mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-02 20:42:04 +00:00
Merge branch 'master' into surahman-CH-31221-AWS-S3-object-version-id
This commit is contained in:
commit
b371251e80
29
.clang-tidy
29
.clang-tidy
@ -1,9 +1,8 @@
|
||||
Checks: '-*,
|
||||
misc-throw-by-value-catch-by-reference,
|
||||
misc-misplaced-const,
|
||||
misc-unconventional-assign-operator,
|
||||
misc-redundant-expression,
|
||||
misc-static-assert,
|
||||
misc-throw-by-value-catch-by-reference,
|
||||
misc-unconventional-assign-operator,
|
||||
misc-uniqueptr-reset-release,
|
||||
misc-unused-alias-decls,
|
||||
@ -18,22 +17,22 @@ Checks: '-*,
|
||||
modernize-redundant-void-arg,
|
||||
modernize-replace-random-shuffle,
|
||||
modernize-use-bool-literals,
|
||||
modernize-use-nullptr,
|
||||
modernize-use-using,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-nullptr,
|
||||
modernize-use-using,
|
||||
|
||||
performance-faster-string-find,
|
||||
performance-for-range-copy,
|
||||
performance-implicit-conversion-in-loop,
|
||||
performance-inefficient-algorithm,
|
||||
performance-inefficient-vector-operation,
|
||||
performance-move-const-arg,
|
||||
performance-move-constructor-init,
|
||||
performance-no-automatic-move,
|
||||
performance-noexcept-move-constructor,
|
||||
performance-trivially-destructible,
|
||||
performance-unnecessary-copy-initialization,
|
||||
performance-noexcept-move-constructor,
|
||||
performance-move-const-arg,
|
||||
|
||||
readability-avoid-const-params-in-decls,
|
||||
readability-const-return-type,
|
||||
@ -42,6 +41,8 @@ Checks: '-*,
|
||||
readability-convert-member-functions-to-static,
|
||||
readability-delete-null-pointer,
|
||||
readability-deleted-default,
|
||||
readability-identifier-naming,
|
||||
readability-inconsistent-declaration-parameter-name,
|
||||
readability-make-member-function-const,
|
||||
readability-misplaced-array-index,
|
||||
readability-non-const-parameter,
|
||||
@ -49,26 +50,23 @@ Checks: '-*,
|
||||
readability-redundant-access-specifiers,
|
||||
readability-redundant-control-flow,
|
||||
readability-redundant-function-ptr-dereference,
|
||||
readability-redundant-member-init,
|
||||
readability-redundant-smartptr-get,
|
||||
readability-redundant-string-cstr,
|
||||
readability-redundant-string-init,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-simplify-subscript-expr,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-string-compare,
|
||||
readability-uniqueptr-delete-release,
|
||||
readability-redundant-member-init,
|
||||
readability-simplify-subscript-expr,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-inconsistent-declaration-parameter-name,
|
||||
readability-identifier-naming,
|
||||
|
||||
bugprone-undelegated-constructor,
|
||||
bugprone-argument-comment,
|
||||
bugprone-bad-signal-to-kill-thread,
|
||||
bugprone-bool-pointer-implicit-conversion,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-dangling-handle,
|
||||
bugprone-forward-declaration-namespace,
|
||||
bugprone-fold-init-type,
|
||||
bugprone-forward-declaration-namespace,
|
||||
bugprone-inaccurate-erase,
|
||||
bugprone-incorrect-roundings,
|
||||
bugprone-infinite-loop,
|
||||
@ -99,6 +97,7 @@ Checks: '-*,
|
||||
bugprone-throw-keyword-missing,
|
||||
bugprone-too-small-loop-variable,
|
||||
bugprone-undefined-memory-manipulation,
|
||||
bugprone-undelegated-constructor,
|
||||
bugprone-unhandled-self-assignment,
|
||||
bugprone-unused-raii,
|
||||
bugprone-unused-return-value,
|
||||
@ -119,8 +118,8 @@ Checks: '-*,
|
||||
google-build-namespaces,
|
||||
google-default-arguments,
|
||||
google-explicit-constructor,
|
||||
google-readability-casting,
|
||||
google-readability-avoid-underscore-in-googletest-name,
|
||||
google-readability-casting,
|
||||
google-runtime-int,
|
||||
google-runtime-operator,
|
||||
|
||||
@ -139,12 +138,12 @@ Checks: '-*,
|
||||
clang-analyzer-core.uninitialized.CapturedBlockVariable,
|
||||
clang-analyzer-core.uninitialized.UndefReturn,
|
||||
clang-analyzer-cplusplus.InnerPointer,
|
||||
clang-analyzer-cplusplus.Move,
|
||||
clang-analyzer-cplusplus.NewDelete,
|
||||
clang-analyzer-cplusplus.NewDeleteLeaks,
|
||||
clang-analyzer-cplusplus.PlacementNewChecker,
|
||||
clang-analyzer-cplusplus.SelfAssignment,
|
||||
clang-analyzer-deadcode.DeadStores,
|
||||
clang-analyzer-cplusplus.Move,
|
||||
clang-analyzer-optin.cplusplus.UninitializedObject,
|
||||
clang-analyzer-optin.cplusplus.VirtualCall,
|
||||
clang-analyzer-security.insecureAPI.UncheckedReturn,
|
||||
|
153
CHANGELOG.md
153
CHANGELOG.md
@ -1,9 +1,162 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v22.4, 2022-04-20](#224)**<br>
|
||||
**[ClickHouse release v22.3-lts, 2022-03-17](#223)**<br>
|
||||
**[ClickHouse release v22.2, 2022-02-17](#222)**<br>
|
||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br>
|
||||
**[Changelog for 2021](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/whats-new/changelog/2021.md)**<br>
|
||||
|
||||
### <a id="224"></a> ClickHouse release master FIXME as compared to v22.3.3.44-lts
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Do not allow SETTINGS after FORMAT for INSERT queries (there is compatibility setting `parser_settings_after_format_compact` to accept such queries, but it is turned OFF by default). [#35883](https://github.com/ClickHouse/ClickHouse/pull/35883) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Function `yandexConsistentHash` (consistent hashing algorithm by Konstantin "kostik" Oblakov) is renamed to `kostikConsistentHash`. The old name is left as an alias for compatibility. Although this change is backward compatible, we may remove the alias in subsequent releases, that's why it's recommended to update the usages of this function in your apps. [#35553](https://github.com/ClickHouse/ClickHouse/pull/35553) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Added INTERPOLATE extension to the ORDER BY ... WITH FILL. Closes [#34903](https://github.com/ClickHouse/ClickHouse/issues/34903). [#35349](https://github.com/ClickHouse/ClickHouse/pull/35349) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Profiling on Processors level (under `log_processors_profiles` setting, ClickHouse will write time that processor spent during execution/waiting for data to `system.processors_profile_log` table). [#34355](https://github.com/ClickHouse/ClickHouse/pull/34355) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added functions makeDate(year, month, day), makeDate32(year, month, day). [#35628](https://github.com/ClickHouse/ClickHouse/pull/35628) ([Alexander Gololobov](https://github.com/davenger)). Implementation of makeDateTime() and makeDateTIme64(). [#35934](https://github.com/ClickHouse/ClickHouse/pull/35934) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Support new type of quota `WRITTEN BYTES` to limit amount of written bytes during insert queries. [#35736](https://github.com/ClickHouse/ClickHouse/pull/35736) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added function `flattenTuple`. It receives nested named `Tuple` as an argument and returns a flatten `Tuple` which elements are the paths from the original `Tuple`. E.g.: `Tuple(a Int, Tuple(b Int, c Int)) -> Tuple(a Int, b Int, c Int)`. `flattenTuple` can be used to select all paths from type `Object` as separate columns. [#35690](https://github.com/ClickHouse/ClickHouse/pull/35690) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added functions `arrayFirstOrNull`, `arrayLastOrNull`. Closes [#35238](https://github.com/ClickHouse/ClickHouse/issues/35238). [#35414](https://github.com/ClickHouse/ClickHouse/pull/35414) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added functions `minSampleSizeContinous` and `minSampleSizeConversion`. Author [achimbab](https://github.com/achimbab). [#35360](https://github.com/ClickHouse/ClickHouse/pull/35360) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* New functions minSampleSizeContinous and minSampleSizeConversion. [#34354](https://github.com/ClickHouse/ClickHouse/pull/34354) ([achimbab](https://github.com/achimbab)).
|
||||
* Introduce format `ProtobufList` (all records as repeated messages in out Protobuf). Closes [#16436](https://github.com/ClickHouse/ClickHouse/issues/16436). [#35152](https://github.com/ClickHouse/ClickHouse/pull/35152) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add `h3PointDistM`, `h3PointDistKm`, `h3PointDistRads`, `h3GetRes0Indexes`, `h3GetPentagonIndexes` functions. [#34568](https://github.com/ClickHouse/ClickHouse/pull/34568) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add `toLastDayOfMonth` function which rounds up a date or date with time to the last day of the month. [#33501](https://github.com/ClickHouse/ClickHouse/issues/33501). [#34394](https://github.com/ClickHouse/ClickHouse/pull/34394) ([Habibullah Oladepo](https://github.com/holadepo)).
|
||||
* New aggregation function groupSortedArray to obtain an array of first N values. [#34055](https://github.com/ClickHouse/ClickHouse/pull/34055) ([palegre-tiny](https://github.com/palegre-tiny)).
|
||||
* Added load balancing setting for \[Zoo\]Keeper client. Closes [#29617](https://github.com/ClickHouse/ClickHouse/issues/29617). [#30325](https://github.com/ClickHouse/ClickHouse/pull/30325) ([小路](https://github.com/nicelulu)).
|
||||
* Add a new kind of row policies named `simple`. Before this PR we had two kinds or row policies: `permissive` and `restrictive`. A `simple` row policy adds a new filter on a table without any side-effects like it was for permissive and restrictive policies. [#35345](https://github.com/ClickHouse/ClickHouse/pull/35345) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Added an ability to specify cluster secret in replicated database. [#35333](https://github.com/ClickHouse/ClickHouse/pull/35333) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Added sanity checks on server startup (available memory and disk space, max thread count, etc). [#34566](https://github.com/ClickHouse/ClickHouse/pull/34566) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* INTERVAL improvement - can be used with `[MILLI|MICRO|NANO]SECOND`. Added `toStartOf[Milli|Micro|Nano]second()` functions. Added `[add|subtract][Milli|Micro|Nano]seconds()`. [#34353](https://github.com/ClickHouse/ClickHouse/pull/34353) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Added support for transactions for simple `MergeTree` tables. This feature is highly experimental and not recommended for production. Part of [#22086](https://github.com/ClickHouse/ClickHouse/issues/22086). [#24258](https://github.com/ClickHouse/ClickHouse/pull/24258) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Support schema inference for type `Object` in format `JSONEachRow`. Allow to convert columns of type `Map` to columns of type `Object`. [#35629](https://github.com/ClickHouse/ClickHouse/pull/35629) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allow to write remote FS cache on all write operations. Add `system.remote_filesystem_cache` table. Add `drop remote filesystem cache` query. Add introspection for s3 metadata with `system.remote_data_paths` table. Closes [#34021](https://github.com/ClickHouse/ClickHouse/issues/34021). Add cache option for merges by adding mode `read_from_filesystem_cache_if_exists_otherwise_bypass_cache` (turned on by default for merges and can also be turned on by query setting with the same name). Rename cache related settings (`remote_fs_enable_cache -> enable_filesystem_cache`, etc). [#35475](https://github.com/ClickHouse/ClickHouse/pull/35475) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* An option to store parts metadata in RocksDB. Speed up parts loading process of MergeTree to accelerate starting up of clickhouse-server. With this improvement, clickhouse-server was able to decrease starting up time from 75 minutes to 20 seconds, with 700k mergetree parts. [#32928](https://github.com/ClickHouse/ClickHouse/pull/32928) ([李扬](https://github.com/taiyang-li)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* A new query plan optimization. Evaluate functions after `ORDER BY` when possible. As an example, for a query `SELECT sipHash64(number) FROM numbers(1e8) ORDER BY number LIMIT 5`, function `sipHash64` would be evaluated after `ORDER BY` and `LIMIT`, which gives ~20x speed up. [#35623](https://github.com/ClickHouse/ClickHouse/pull/35623) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Sizes of hash tables used during aggregation now collected and used in later queries to avoid hash tables resizes. [#33439](https://github.com/ClickHouse/ClickHouse/pull/33439) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Improvement for hasAll function using SIMD instructions (SSE and AVX2). [#27653](https://github.com/ClickHouse/ClickHouse/pull/27653) ([youennL-cs](https://github.com/youennL-cs)). [#35723](https://github.com/ClickHouse/ClickHouse/pull/35723) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Multiple changes to improve ASOF JOIN performance (1.2 - 1.6x as fast). It also adds support to use big integers. [#34733](https://github.com/ClickHouse/ClickHouse/pull/34733) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Improve performance of ASOF JOIN if key is native integer. [#35525](https://github.com/ClickHouse/ClickHouse/pull/35525) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Parallelization of multipart upload into S3 storage. [#35343](https://github.com/ClickHouse/ClickHouse/pull/35343) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* URL storage engine now downloads multiple chunks in parallel if the endpoint supports HTTP Range. Two additional settings were added, `max_download_threads` and `max_download_buffer_size`, which control maximum number of threads a single query can use to download the file and the maximum number of bytes each thread can process. [#35150](https://github.com/ClickHouse/ClickHouse/pull/35150) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Use multiple threads to download objects from S3. Downloading is controllable using `max_download_threads` and `max_download_buffer_size` settings. [#35571](https://github.com/ClickHouse/ClickHouse/pull/35571) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Narrow mutex scope when interacting with HDFS. Related to [#35292](https://github.com/ClickHouse/ClickHouse/issues/35292). [#35646](https://github.com/ClickHouse/ClickHouse/pull/35646) ([shuchaome](https://github.com/shuchaome)).
|
||||
* Require mutations for per-table TTL only when it had been changed. [#35953](https://github.com/ClickHouse/ClickHouse/pull/35953) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Multiple improvements for schema inference. Use some tweaks and heuristics to determine numbers, strings, arrays, tuples and maps in CSV, TSV and TSVRaw data formats. Add setting `input_format_csv_use_best_effort_in_schema_inference` for CSV format that enables/disables using these heuristics, if it's disabled, we treat everything as string. Add similar setting `input_format_tsv_use_best_effort_in_schema_inference` for TSV/TSVRaw format. These settings are enabled by default. - Add Maps support for schema inference in Values format. - Fix possible segfault in schema inference in Values format. - Allow to skip columns with unsupported types in Arrow/ORC/Parquet formats. Add corresponding settings for it: `input_format_{parquet|orc|arrow}_skip_columns_with_unsupported_types_in_schema_inference`. These settings are disabled by default. - Allow to convert a column with type Null to a Nullable column with all NULL values in Arrow/Parquet formats. - Allow to specify column names in schema inference via setting `column_names_for_schema_inference` for formats that don't contain column names (like CSV, TSV, JSONCompactEachRow, etc) - Fix schema inference in ORC/Arrow/Parquet formats in terms of working with Nullable columns. Previously all inferred types were not Nullable and it blocked reading Nullable columns from data, now it's fixed and all inferred types are always Nullable (because we cannot understand that column is Nullable or not by reading the schema). - Fix schema inference in Template format with CSV escaping rules. [#35582](https://github.com/ClickHouse/ClickHouse/pull/35582) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add parallel parsing and schema inference for format `JSONAsObject`. [#35592](https://github.com/ClickHouse/ClickHouse/pull/35592) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added a support for automatic schema inference to `s3Cluster` table function. Synced the signatures of `s3 ` and `s3Cluster`. [#35544](https://github.com/ClickHouse/ClickHouse/pull/35544) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Added support for schema inference for `hdfsCluster`. [#35602](https://github.com/ClickHouse/ClickHouse/pull/35602) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add new setting `input_format_json_read_bools_as_numbers` that allows to infer and parse bools as numbers in JSON input formats. It's enabled by default. Suggested by @alexey-milovidov. [#35735](https://github.com/ClickHouse/ClickHouse/pull/35735) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Improve columns ordering in schema inference for formats TSKV and JSONEachRow, closes [#35640](https://github.com/ClickHouse/ClickHouse/issues/35640). Don't stop schema inference when reading empty row in schema inference for formats TSKV and JSONEachRow. [#35724](https://github.com/ClickHouse/ClickHouse/pull/35724) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add settings `input_format_orc_case_insensitive_column_matching`, `input_format_arrow_case_insensitive_column_matching`, and `input_format_parquet_case_insensitive_column_matching` which allows ClickHouse to use case insensitive matching of columns while reading data from ORC, Arrow or Parquet files. [#35459](https://github.com/ClickHouse/ClickHouse/pull/35459) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Added `is_secure` column to `system.query_log` which denotes if the client is using a secure connection over TCP or HTTP. [#35705](https://github.com/ClickHouse/ClickHouse/pull/35705) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Now `kafka_num_consumers` can be bigger than amount of physical cores in case of low resource machine (less than 16 cores). [#35926](https://github.com/ClickHouse/ClickHouse/pull/35926) ([alesapin](https://github.com/alesapin)).
|
||||
* Add some basic metrics to monitor engine=Kafka tables. [#35916](https://github.com/ClickHouse/ClickHouse/pull/35916) ([filimonov](https://github.com/filimonov)).
|
||||
* Now it's not allowed to `ALTER TABLE ... RESET SETTING` for non-existing settings for MergeTree engines family. Fixes [#35816](https://github.com/ClickHouse/ClickHouse/issues/35816). [#35884](https://github.com/ClickHouse/ClickHouse/pull/35884) ([alesapin](https://github.com/alesapin)).
|
||||
* Now some `ALTER MODIFY COLUMN` queries for `Arrays` and `Nullable` types can be done at metadata level without mutations. For example, alter from `Array(Enum8('Option1'=1))` to `Array(Enum8('Option1'=1, 'Option2'=2))`. [#35882](https://github.com/ClickHouse/ClickHouse/pull/35882) ([alesapin](https://github.com/alesapin)).
|
||||
* Added an animation to the hourglass icon to indicate to the user that a query is running. [#35860](https://github.com/ClickHouse/ClickHouse/pull/35860) ([peledni](https://github.com/peledni)).
|
||||
* support ALTER TABLE t DETACH PARTITION (ALL). [#35794](https://github.com/ClickHouse/ClickHouse/pull/35794) ([awakeljw](https://github.com/awakeljw)).
|
||||
* Improve projection analysis to optimize trivial queries such as `count()`. [#35788](https://github.com/ClickHouse/ClickHouse/pull/35788) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Support schema inference for insert select with using `input` table function. Get schema from insertion table instead of inferring it from the data in case of insert select from table functions that support schema inference. Closes [#35639](https://github.com/ClickHouse/ClickHouse/issues/35639). [#35760](https://github.com/ClickHouse/ClickHouse/pull/35760) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Respect `remote_url_allow_hosts` for Hive tables. [#35743](https://github.com/ClickHouse/ClickHouse/pull/35743) ([李扬](https://github.com/taiyang-li)).
|
||||
* Implement `send_logs_level` for clickhouse-local. Closes [#35653](https://github.com/ClickHouse/ClickHouse/issues/35653). [#35716](https://github.com/ClickHouse/ClickHouse/pull/35716) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Closes [#35641](https://github.com/ClickHouse/ClickHouse/issues/35641) Allow `EPHEMERAL` columns without explicit default expression. [#35706](https://github.com/ClickHouse/ClickHouse/pull/35706) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add profile event counter `AsyncInsertBytes` about size of async INSERTs. [#35644](https://github.com/ClickHouse/ClickHouse/pull/35644) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve the pipeline description for JOIN. [#35612](https://github.com/ClickHouse/ClickHouse/pull/35612) ([何李夫](https://github.com/helifu)).
|
||||
* Deduce absolute hdfs config path. [#35572](https://github.com/ClickHouse/ClickHouse/pull/35572) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improve pasting performance and compatibility of clickhouse-client. This helps [#35501](https://github.com/ClickHouse/ClickHouse/issues/35501). [#35541](https://github.com/ClickHouse/ClickHouse/pull/35541) ([Amos Bird](https://github.com/amosbird)).
|
||||
* It was possible to get stack overflow in distributed queries if one of the settings `async_socket_for_remote` and `use_hedged_requests` is enabled while parsing very deeply nested data type (at least in debug build). Closes [#35509](https://github.com/ClickHouse/ClickHouse/issues/35509). [#35524](https://github.com/ClickHouse/ClickHouse/pull/35524) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add sizes of subcolumns to `system.parts_columns` table. [#35488](https://github.com/ClickHouse/ClickHouse/pull/35488) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add explicit table info to the scan node of query plan and pipeline. [#35460](https://github.com/ClickHouse/ClickHouse/pull/35460) ([何李夫](https://github.com/helifu)).
|
||||
* Allow server to bind to low-numbered ports (e.g. 443). ClickHouse installation script will set `cap_net_bind_service` to the binary file. [#35451](https://github.com/ClickHouse/ClickHouse/pull/35451) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix INSERT INTO table FROM INFILE: it did not display the progress bar. [#35429](https://github.com/ClickHouse/ClickHouse/pull/35429) ([xiedeyantu](https://github.com/xiedeyantu)).
|
||||
* Add arguments `--user`, `--password`, `--host`, `--port` for `clickhouse-diagnostics` tool. [#35422](https://github.com/ClickHouse/ClickHouse/pull/35422) ([李扬](https://github.com/taiyang-li)).
|
||||
* Support uuid for Postgres engines. Closes [#35384](https://github.com/ClickHouse/ClickHouse/issues/35384). [#35403](https://github.com/ClickHouse/ClickHouse/pull/35403) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* For table function `s3cluster` or `HDFSCluster` or `hive`, we can't get right `AccessType` by `StorageFactory::instance().getSourceAccessType(getStorageTypeName())`. This pr fix it. [#35365](https://github.com/ClickHouse/ClickHouse/pull/35365) ([李扬](https://github.com/taiyang-li)).
|
||||
* Remove `--testmode` option for clickhouse-client, enable it unconditionally. [#35354](https://github.com/ClickHouse/ClickHouse/pull/35354) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Don't allow `wchc` operation (four letter command) for clickhouse-keeper. [#35320](https://github.com/ClickHouse/ClickHouse/pull/35320) ([zhangyuli1](https://github.com/zhangyuli1)).
|
||||
* Add function `getTypeSerializationStreams`. For a specified type (which is detected from column), it returns an array with all the serialization substream paths. This function is useful mainly for developers. [#35290](https://github.com/ClickHouse/ClickHouse/pull/35290) ([李扬](https://github.com/taiyang-li)).
|
||||
* If `port` is not specified in cluster configuration, default server port will be used. This closes [#34769](https://github.com/ClickHouse/ClickHouse/issues/34769). [#34772](https://github.com/ClickHouse/ClickHouse/pull/34772) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Use `minmax` index for orc/parquet file in Hive Engine. Related PR: https://github.com/ClickHouse/arrow/pull/10. [#34631](https://github.com/ClickHouse/ClickHouse/pull/34631) ([李扬](https://github.com/taiyang-li)).
|
||||
* System log tables now allow to specify COMMENT in ENGINE declaration. Closes [#33768](https://github.com/ClickHouse/ClickHouse/issues/33768). [#34536](https://github.com/ClickHouse/ClickHouse/pull/34536) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Proper support of setting `max_rows_to_read` in case of reading in order of sorting key and specified limit. Previously the exception `Limit for rows or bytes to read exceeded` could be thrown even if query actually requires to read less amount of rows. [#33230](https://github.com/ClickHouse/ClickHouse/pull/33230) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Respect only quota & period from cgroups, ignore shares (which are not really limit the number of the cores which can be used). [#35815](https://github.com/ClickHouse/ClickHouse/pull/35815) ([filimonov](https://github.com/filimonov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Add next batch of randomization settings in functional tests. [#35047](https://github.com/ClickHouse/ClickHouse/pull/35047) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add backward compatibility check in stress test. Closes [#25088](https://github.com/ClickHouse/ClickHouse/issues/25088). [#27928](https://github.com/ClickHouse/ClickHouse/pull/27928) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Migrate package building to `nfpm` - Deprecate `release` script in favor of `packages/build` - Build everything in clickhouse/binary-builder image (cleanup: clickhouse/deb-builder) - Add symbol stripping to cmake (todo: use $prefix/lib/$bin_dir/clickhouse/$binary.debug) - Fix issue with DWARF symbols - Add Alpine APK packages - Rename `alien` to `additional_pkgs`. [#33664](https://github.com/ClickHouse/ClickHouse/pull/33664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add a night scan and upload for Coverity. [#34895](https://github.com/ClickHouse/ClickHouse/pull/34895) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||
* A dedicated small package for `clickhouse-keeper`. [#35308](https://github.com/ClickHouse/ClickHouse/pull/35308) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Running with podman was failing: it complains about specifying the same volume twice. [#35978](https://github.com/ClickHouse/ClickHouse/pull/35978) ([Roman Nikonov](https://github.com/nic11)).
|
||||
* Minor improvement in contrib/krb5 build configuration. [#35832](https://github.com/ClickHouse/ClickHouse/pull/35832) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Add a label to recognize a building task for every image. [#35583](https://github.com/ClickHouse/ClickHouse/pull/35583) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Apply `black` formatter to python code and add a per-commit check. [#35466](https://github.com/ClickHouse/ClickHouse/pull/35466) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Redo alpine image to use clean Dockerfile. Create a script in tests/ci to build both ubuntu and alpine images. Add clickhouse-keeper image (cc @nikitamikhaylov). Add build check to PullRequestCI. Add a job to a ReleaseCI. Add a job to MasterCI to build and push `clickhouse/clickhouse-server:head` and `clickhouse/clickhouse-keeper:head` images for each merged PR. [#35211](https://github.com/ClickHouse/ClickHouse/pull/35211) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix stress-test report in CI, now we upload the runlog with information about started stress tests only once. [#35093](https://github.com/ClickHouse/ClickHouse/pull/35093) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Switch to libcxx / libcxxabi from LLVM 14. [#34906](https://github.com/ClickHouse/ClickHouse/pull/34906) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update unixodbc to mitigate CVE-2018-7485. Note: this CVE is not relevant for ClickHouse as it implements its own isolation layer for ODBC. [#35943](https://github.com/ClickHouse/ClickHouse/pull/35943) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Added settings `input_format_ipv4_default_on_conversion_error`, `input_format_ipv6_default_on_conversion_error` to allow insert of invalid ip address values as default into tables. Closes [#35726](https://github.com/ClickHouse/ClickHouse/issues/35726). [#35733](https://github.com/ClickHouse/ClickHouse/pull/35733) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Avoid erasing columns from a block if it doesn't exist while reading data from Hive. [#35393](https://github.com/ClickHouse/ClickHouse/pull/35393) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Add type checking when creating materialized view. Close: [#23684](https://github.com/ClickHouse/ClickHouse/issues/23684). [#24896](https://github.com/ClickHouse/ClickHouse/pull/24896) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Fix formatting of INSERT INFILE queries (missing quotes). [#35886](https://github.com/ClickHouse/ClickHouse/pull/35886) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disable `session_log` because memory safety issue has been found by fuzzing. See [#35714](https://github.com/ClickHouse/ClickHouse/issues/35714). [#35873](https://github.com/ClickHouse/ClickHouse/pull/35873) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Avoid processing per-column TTL multiple times. [#35820](https://github.com/ClickHouse/ClickHouse/pull/35820) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix inserts to columns of type `Object` in case when there is data related to several partitions in insert query. [#35806](https://github.com/ClickHouse/ClickHouse/pull/35806) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix bug in indexes of not presented columns in -WithNames formats that led to error `INCORRECT_NUMBER_OF_COLUMNS ` when the number of columns is more than 256. Closes [#35793](https://github.com/ClickHouse/ClickHouse/issues/35793). [#35803](https://github.com/ClickHouse/ClickHouse/pull/35803) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixes [#35751](https://github.com/ClickHouse/ClickHouse/issues/35751). [#35799](https://github.com/ClickHouse/ClickHouse/pull/35799) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix for reading from HDFS in Snappy format. [#35771](https://github.com/ClickHouse/ClickHouse/pull/35771) ([shuchaome](https://github.com/shuchaome)).
|
||||
* Fix bug in conversion from custom types to string that could lead to segfault or unexpected error messages. Closes [#35752](https://github.com/ClickHouse/ClickHouse/issues/35752). [#35755](https://github.com/ClickHouse/ClickHouse/pull/35755) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix any/all (subquery) implementation. Closes [#35489](https://github.com/ClickHouse/ClickHouse/issues/35489). [#35727](https://github.com/ClickHouse/ClickHouse/pull/35727) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix dropping non-empty database in clickhouse-local. Closes [#35692](https://github.com/ClickHouse/ClickHouse/issues/35692). [#35711](https://github.com/ClickHouse/ClickHouse/pull/35711) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix bug in creating materialized view with subquery after server restart. Materialized view was not getting updated after inserts into underlying table after server restart. Closes [#35511](https://github.com/ClickHouse/ClickHouse/issues/35511). [#35691](https://github.com/ClickHouse/ClickHouse/pull/35691) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible `Can't adjust last granule` exception while reading subcolumns of experimental type `Object`. [#35687](https://github.com/ClickHouse/ClickHouse/pull/35687) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Enable build with JIT compilation by default. [#35683](https://github.com/ClickHouse/ClickHouse/pull/35683) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix possible loss of subcolumns in experimental type `Object`. [#35682](https://github.com/ClickHouse/ClickHouse/pull/35682) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix check ASOF JOIN key nullability, close [#35565](https://github.com/ClickHouse/ClickHouse/issues/35565). [#35674](https://github.com/ClickHouse/ClickHouse/pull/35674) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix part checking logic for parts with projections. Error happened when projection and main part had different types. This is similar to https://github.com/ClickHouse/ClickHouse/pull/33774 . The bug is addressed by @caoyang10. [#35667](https://github.com/ClickHouse/ClickHouse/pull/35667) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix server crash when large number of arguments are passed into `format` function. Please refer to the test file and see how to reproduce the crash. [#35651](https://github.com/ClickHouse/ClickHouse/pull/35651) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix usage of quotas with asynchronous inserts. [#35645](https://github.com/ClickHouse/ClickHouse/pull/35645) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix positional arguments with aliases. Closes [#35600](https://github.com/ClickHouse/ClickHouse/issues/35600). [#35620](https://github.com/ClickHouse/ClickHouse/pull/35620) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Check `remote_url_allow_hosts` before schema inference in URL engine Closes [#35064](https://github.com/ClickHouse/ClickHouse/issues/35064). [#35619](https://github.com/ClickHouse/ClickHouse/pull/35619) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `HashJoin` when columns with `LowCardinality` type are used. This closes [#35548](https://github.com/ClickHouse/ClickHouse/issues/35548). [#35616](https://github.com/ClickHouse/ClickHouse/pull/35616) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible segfault in MaterializedPostgreSQL which happened if exception occurred when data, collected in memory, was synced into underlying tables. Closes [#35611](https://github.com/ClickHouse/ClickHouse/issues/35611). [#35614](https://github.com/ClickHouse/ClickHouse/pull/35614) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Setting `database_atomic_wait_for_drop_and_detach_synchronously` worked incorrectly for `ATTACH TABLE` query when previously detached table is still in use, It's fixed. [#35594](https://github.com/ClickHouse/ClickHouse/pull/35594) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix HTTP headers with named collections, add compression_method. Closes [#35273](https://github.com/ClickHouse/ClickHouse/issues/35273). Closes [#35269](https://github.com/ClickHouse/ClickHouse/issues/35269). [#35593](https://github.com/ClickHouse/ClickHouse/pull/35593) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix s3 engine getting virtual columns. Closes [#35411](https://github.com/ClickHouse/ClickHouse/issues/35411). [#35586](https://github.com/ClickHouse/ClickHouse/pull/35586) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed return type deduction for `caseWithExpression`. The type of the ELSE branch is now correctly taken into account. [#35576](https://github.com/ClickHouse/ClickHouse/pull/35576) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix parsing of IPv6 addresses longer than 39 characters. Closes [#34022](https://github.com/ClickHouse/ClickHouse/issues/34022). [#35539](https://github.com/ClickHouse/ClickHouse/pull/35539) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix cast into IPv4, IPv6 address in IN section. Fixes [#35528](https://github.com/ClickHouse/ClickHouse/issues/35528). [#35534](https://github.com/ClickHouse/ClickHouse/pull/35534) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix crash during short circuit function evaluation when one of arguments is nullable constant. Closes [#35497](https://github.com/ClickHouse/ClickHouse/issues/35497). Closes [#35496](https://github.com/ClickHouse/ClickHouse/issues/35496). [#35502](https://github.com/ClickHouse/ClickHouse/pull/35502) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix crash for function `throwIf` with constant arguments. [#35500](https://github.com/ClickHouse/ClickHouse/pull/35500) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix bug in Keeper which can lead to unstable client connections. Introduced in [#35031](https://github.com/ClickHouse/ClickHouse/issues/35031). [#35498](https://github.com/ClickHouse/ClickHouse/pull/35498) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix bug in function `if` when resulting column type differs with resulting data type that led to logical errors like `Logical error: 'Bad cast from type DB::ColumnVector<int> to DB::ColumnVector<long>'.`. Closes [#35367](https://github.com/ClickHouse/ClickHouse/issues/35367). [#35476](https://github.com/ClickHouse/ClickHouse/pull/35476) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix excessive logging when using S3 as backend for MergeTree or as separate table engine/function. Fixes [#30559](https://github.com/ClickHouse/ClickHouse/issues/30559). [#35434](https://github.com/ClickHouse/ClickHouse/pull/35434) ([alesapin](https://github.com/alesapin)).
|
||||
* Now merges executed with zero copy replication (experimental) will not spam logs with message `Found parts with the same min block and with the same max block as the missing part _ on replica _. Hoping that it will eventually appear as a result of a merge.`. [#35430](https://github.com/ClickHouse/ClickHouse/pull/35430) ([alesapin](https://github.com/alesapin)).
|
||||
* Skip possible exception if empty chunks appear in GroupingAggregatedTransform. [#35417](https://github.com/ClickHouse/ClickHouse/pull/35417) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix working with columns that are not needed in query in Arrow/Parquet/ORC formats, it prevents possible errors like `Unsupported <format> type <type> of an input column <column_name>` when file contains column with unsupported type and we don't use it in query. [#35406](https://github.com/ClickHouse/ClickHouse/pull/35406) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix for local cache for remote filesystem (experimental feature) for high concurrency on corner cases. [#35381](https://github.com/ClickHouse/ClickHouse/pull/35381) ([Kseniia Sumarokova](https://github.com/kssenii)). Fix possible deadlock in cache. [#35378](https://github.com/ClickHouse/ClickHouse/pull/35378) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix partition pruning in case of comparison with constant in `WHERE`. If column and constant had different types, overflow was possible. Query could return an incorrect empty result. This fixes [#35304](https://github.com/ClickHouse/ClickHouse/issues/35304). [#35334](https://github.com/ClickHouse/ClickHouse/pull/35334) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix schema inference for TSKV format while using small max_read_buffer_size. [#35332](https://github.com/ClickHouse/ClickHouse/pull/35332) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix mutations in tables with enabled sparse columns. [#35284](https://github.com/ClickHouse/ClickHouse/pull/35284) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Do not delay final part writing by default (fixes possible `Memory limit exceeded` during `INSERT` by adding `max_insert_delayed_streams_for_parallel_write` with default to 1000 for writes to s3 and disabled as before otherwise). [#34780](https://github.com/ClickHouse/ClickHouse/pull/34780) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
|
||||
## <a id="223"></a> ClickHouse release v22.3-lts, 2022-03-17
|
||||
|
||||
|
@ -73,7 +73,7 @@ replxx::Replxx::completions_t LineReader::Suggest::getCompletions(const String &
|
||||
if (std::string::npos == last_word_pos)
|
||||
last_word = prefix;
|
||||
else
|
||||
last_word = std::string_view(prefix).substr(last_word_pos + 1, std::string::npos);
|
||||
last_word = std::string_view{prefix}.substr(last_word_pos + 1, std::string::npos);
|
||||
/// last_word can be empty.
|
||||
|
||||
std::pair<Words::const_iterator, Words::const_iterator> range;
|
||||
|
@ -61,6 +61,7 @@
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
# pragma GCC diagnostic ignored "-Wunused-macros"
|
||||
// NOLINTNEXTLINE(bugprone-reserved-identifier)
|
||||
# define _XOPEN_SOURCE 700 // ucontext is not available without _XOPEN_SOURCE
|
||||
#endif
|
||||
#include <ucontext.h>
|
||||
@ -132,7 +133,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
||||
DB::writePODBinary(*info, out);
|
||||
DB::writePODBinary(signal_context, out);
|
||||
DB::writePODBinary(stack_trace, out);
|
||||
DB::writeBinary(UInt32(getThreadId()), out);
|
||||
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||
DB::writePODBinary(DB::current_thread, out);
|
||||
|
||||
out.next();
|
||||
@ -435,7 +436,7 @@ static void sanitizerDeathCallback()
|
||||
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
|
||||
|
||||
DB::writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
|
||||
DB::writeBinary(UInt32(getThreadId()), out);
|
||||
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||
DB::writeBinary(log_message, out);
|
||||
out.next();
|
||||
|
||||
|
@ -103,7 +103,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
columns[i++]->insert(DNSResolver::instance().getHostName());
|
||||
columns[i++]->insert(msg_ext.query_id);
|
||||
columns[i++]->insert(msg_ext.thread_id);
|
||||
columns[i++]->insert(Int64(msg.getPriority()));
|
||||
columns[i++]->insert(static_cast<Int64>(msg.getPriority()));
|
||||
columns[i++]->insert(msg.getSource());
|
||||
columns[i++]->insert(msg.getText());
|
||||
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54461)
|
||||
SET(VERSION_REVISION 54462)
|
||||
SET(VERSION_MAJOR 22)
|
||||
SET(VERSION_MINOR 4)
|
||||
SET(VERSION_MINOR 5)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 92ab33f560e638d1989c5ca543021ab53d110f5c)
|
||||
SET(VERSION_DESCRIBE v22.4.1.1-testing)
|
||||
SET(VERSION_STRING 22.4.1.1)
|
||||
SET(VERSION_GITHASH 77a82cc090dd5dba2d995946e82a12a2cadaaff3)
|
||||
SET(VERSION_DESCRIBE v22.5.1.1-testing)
|
||||
SET(VERSION_STRING 22.5.1.1)
|
||||
# end of autochange
|
||||
|
@ -15,11 +15,11 @@ fi
|
||||
# current curl version options.
|
||||
function curl_with_retry
|
||||
{
|
||||
for _ in 1 2 3 4; do
|
||||
for _ in 1 2 3 4 5 6 7 8 9 10; do
|
||||
if curl --fail --head "$1";then
|
||||
return 0
|
||||
else
|
||||
sleep 0.5
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
|
@ -218,12 +218,12 @@ zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" /test_
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /test_output/logical_errors.txt \
|
||||
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/logical_errors.txt \
|
||||
&& echo -e 'Logical error thrown (see clickhouse-server.log or logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
@ -231,12 +231,12 @@ zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-serve
|
||||
[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log* > /test_output/fatal_messages.txt \
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/fatal_messages.txt \
|
||||
&& echo -e 'Fatal message in clickhouse-server.log (see fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
@ -375,14 +375,6 @@ else
|
||||
echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv
|
||||
fi
|
||||
|
||||
# Put logs into /test_output/
|
||||
for log_file in /var/log/clickhouse-server/clickhouse-server.log*
|
||||
do
|
||||
pigz < "${log_file}" > /test_output/"$(basename ${log_file})".gz
|
||||
# FIXME: remove once only github actions will be left
|
||||
rm "${log_file}"
|
||||
done
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/
|
||||
|
||||
|
@ -239,12 +239,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
uint32_t path_length = 0;
|
||||
_NSGetExecutablePath(nullptr, &path_length);
|
||||
if (path_length <= 1)
|
||||
Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
|
||||
std::string path(path_length, std::string::value_type());
|
||||
auto res = _NSGetExecutablePath(&path[0], &path_length);
|
||||
if (res != 0)
|
||||
Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
|
||||
if (path.back() == '\0')
|
||||
path.pop_back();
|
||||
|
@ -334,11 +334,24 @@ struct Checker
|
||||
#endif
|
||||
;
|
||||
|
||||
/// NOTE: We will migrate to full static linking or our own dynamic loader to make this code obsolete.
|
||||
void checkHarmfulEnvironmentVariables()
|
||||
{
|
||||
/// The list is a selection from "man ld-linux". And one variable that is Mac OS X specific.
|
||||
/// NOTE: We will migrate to full static linking or our own dynamic loader to make this code obsolete.
|
||||
for (const auto * var : {"LD_PRELOAD", "LD_LIBRARY_PATH", "LD_ORIGIN_PATH", "LD_AUDIT", "LD_DYNAMIC_WEAK", "DYLD_INSERT_LIBRARIES"})
|
||||
std::initializer_list<const char *> harmful_env_variables = {
|
||||
/// The list is a selection from "man ld-linux".
|
||||
"LD_PRELOAD",
|
||||
"LD_LIBRARY_PATH",
|
||||
"LD_ORIGIN_PATH",
|
||||
"LD_AUDIT",
|
||||
"LD_DYNAMIC_WEAK",
|
||||
/// The list is a selection from "man dyld" (osx).
|
||||
"DYLD_LIBRARY_PATH",
|
||||
"DYLD_FALLBACK_LIBRARY_PATH",
|
||||
"DYLD_VERSIONED_LIBRARY_PATH",
|
||||
"DYLD_INSERT_LIBRARIES",
|
||||
};
|
||||
|
||||
for (const auto * var : harmful_env_variables)
|
||||
{
|
||||
if (const char * value = getenv(var); value && value[0])
|
||||
{
|
||||
|
@ -549,7 +549,7 @@ private:
|
||||
|
||||
CodePoint sample(UInt64 random, double end_multiplier) const
|
||||
{
|
||||
UInt64 range = total + UInt64(count_end * end_multiplier);
|
||||
UInt64 range = total + static_cast<UInt64>(count_end * end_multiplier);
|
||||
if (range == 0)
|
||||
return END;
|
||||
|
||||
@ -728,7 +728,7 @@ public:
|
||||
if (!histogram.total)
|
||||
continue;
|
||||
|
||||
double average = double(histogram.total) / histogram.buckets.size();
|
||||
double average = static_cast<double>(histogram.total) / histogram.buckets.size();
|
||||
|
||||
UInt64 new_total = 0;
|
||||
for (auto & bucket : histogram.buckets)
|
||||
|
@ -1339,7 +1339,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
global_context->getMergeTreeSettings().sanityCheck(settings);
|
||||
global_context->getReplicatedMergeTreeSettings().sanityCheck(settings);
|
||||
|
||||
|
||||
/// try set up encryption. There are some errors in config, error will be printed and server wouldn't start.
|
||||
CompressionCodecEncrypted::Configuration::instance().load(config(), "encryption_codecs");
|
||||
|
||||
|
@ -16,18 +16,6 @@
|
||||
#include <Interpreters/Access/InterpreterGrantQuery.h>
|
||||
#include <Interpreters/Access/InterpreterShowCreateAccessEntityQuery.h>
|
||||
#include <Interpreters/Access/InterpreterShowGrantsQuery.h>
|
||||
#include <Parsers/Access/ASTCreateQuotaQuery.h>
|
||||
#include <Parsers/Access/ASTCreateRoleQuery.h>
|
||||
#include <Parsers/Access/ASTCreateRowPolicyQuery.h>
|
||||
#include <Parsers/Access/ASTCreateSettingsProfileQuery.h>
|
||||
#include <Parsers/Access/ASTCreateUserQuery.h>
|
||||
#include <Parsers/Access/ASTGrantQuery.h>
|
||||
#include <Parsers/Access/ParserCreateQuotaQuery.h>
|
||||
#include <Parsers/Access/ParserCreateRoleQuery.h>
|
||||
#include <Parsers/Access/ParserCreateRowPolicyQuery.h>
|
||||
#include <Parsers/Access/ParserCreateSettingsProfileQuery.h>
|
||||
#include <Parsers/Access/ParserCreateUserQuery.h>
|
||||
#include <Parsers/Access/ParserGrantQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
@ -40,39 +28,6 @@ namespace ErrorCodes
|
||||
extern const int INCORRECT_ACCESS_ENTITY_DEFINITION;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
/// Special parser for the 'ATTACH access entity' queries.
|
||||
class ParserAttachAccessEntity : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const override { return "ATTACH access entity query"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
ParserCreateUserQuery create_user_p;
|
||||
ParserCreateRoleQuery create_role_p;
|
||||
ParserCreateRowPolicyQuery create_policy_p;
|
||||
ParserCreateQuotaQuery create_quota_p;
|
||||
ParserCreateSettingsProfileQuery create_profile_p;
|
||||
ParserGrantQuery grant_p;
|
||||
|
||||
create_user_p.useAttachMode();
|
||||
create_role_p.useAttachMode();
|
||||
create_policy_p.useAttachMode();
|
||||
create_quota_p.useAttachMode();
|
||||
create_profile_p.useAttachMode();
|
||||
grant_p.useAttachMode();
|
||||
|
||||
return create_user_p.parse(pos, node, expected) || create_role_p.parse(pos, node, expected)
|
||||
|| create_policy_p.parse(pos, node, expected) || create_quota_p.parse(pos, node, expected)
|
||||
|| create_profile_p.parse(pos, node, expected) || grant_p.parse(pos, node, expected);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
String serializeAccessEntity(const IAccessEntity & entity)
|
||||
{
|
||||
/// Build list of ATTACH queries.
|
||||
|
@ -1,10 +1,51 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/Access/ASTCreateQuotaQuery.h>
|
||||
#include <Parsers/Access/ASTCreateRoleQuery.h>
|
||||
#include <Parsers/Access/ASTCreateRowPolicyQuery.h>
|
||||
#include <Parsers/Access/ASTCreateSettingsProfileQuery.h>
|
||||
#include <Parsers/Access/ASTCreateUserQuery.h>
|
||||
#include <Parsers/Access/ASTGrantQuery.h>
|
||||
#include <Parsers/Access/ParserCreateQuotaQuery.h>
|
||||
#include <Parsers/Access/ParserCreateRoleQuery.h>
|
||||
#include <Parsers/Access/ParserCreateRowPolicyQuery.h>
|
||||
#include <Parsers/Access/ParserCreateSettingsProfileQuery.h>
|
||||
#include <Parsers/Access/ParserCreateUserQuery.h>
|
||||
#include <Parsers/Access/ParserGrantQuery.h>
|
||||
#include <base/types.h>
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Special parser for the 'ATTACH access entity' queries.
|
||||
class ParserAttachAccessEntity : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const override { return "ATTACH access entity query"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
ParserCreateUserQuery create_user_p;
|
||||
ParserCreateRoleQuery create_role_p;
|
||||
ParserCreateRowPolicyQuery create_policy_p;
|
||||
ParserCreateQuotaQuery create_quota_p;
|
||||
ParserCreateSettingsProfileQuery create_profile_p;
|
||||
ParserGrantQuery grant_p;
|
||||
|
||||
create_user_p.useAttachMode();
|
||||
create_role_p.useAttachMode();
|
||||
create_policy_p.useAttachMode();
|
||||
create_quota_p.useAttachMode();
|
||||
create_profile_p.useAttachMode();
|
||||
grant_p.useAttachMode();
|
||||
|
||||
return create_user_p.parse(pos, node, expected) || create_role_p.parse(pos, node, expected)
|
||||
|| create_policy_p.parse(pos, node, expected) || create_quota_p.parse(pos, node, expected)
|
||||
|| create_profile_p.parse(pos, node, expected) || grant_p.parse(pos, node, expected);
|
||||
}
|
||||
};
|
||||
|
||||
struct IAccessEntity;
|
||||
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;
|
||||
|
||||
|
@ -31,9 +31,9 @@ namespace
|
||||
return (Util::encodeDoubleSHA1(password) == password_double_sha1);
|
||||
}
|
||||
|
||||
bool checkPasswordSHA256(const std::string_view & password, const Digest & password_sha256)
|
||||
bool checkPasswordSHA256(const std::string_view & password, const Digest & password_sha256, const String & salt)
|
||||
{
|
||||
return Util::encodeSHA256(password) == password_sha256;
|
||||
return Util::encodeSHA256(String(password).append(salt)) == password_sha256;
|
||||
}
|
||||
|
||||
bool checkPasswordDoubleSHA1MySQL(const std::string_view & scramble, const std::string_view & scrambled_password, const Digest & password_double_sha1)
|
||||
@ -132,7 +132,7 @@ bool Authentication::areCredentialsValid(const Credentials & credentials, const
|
||||
return checkPasswordPlainText(basic_credentials->getPassword(), auth_data.getPasswordHashBinary());
|
||||
|
||||
case AuthenticationType::SHA256_PASSWORD:
|
||||
return checkPasswordSHA256(basic_credentials->getPassword(), auth_data.getPasswordHashBinary());
|
||||
return checkPasswordSHA256(basic_credentials->getPassword(), auth_data.getPasswordHashBinary(), auth_data.getSalt());
|
||||
|
||||
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
|
||||
return checkPasswordDoubleSHA1(basic_credentials->getPassword(), auth_data.getPasswordHashBinary());
|
||||
|
@ -210,6 +210,17 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
|
||||
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void AuthenticationData::setSalt(String salt_)
|
||||
{
|
||||
if (type != AuthenticationType::SHA256_PASSWORD)
|
||||
throw Exception("setSalt(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
salt = std::move(salt_);
|
||||
}
|
||||
|
||||
String AuthenticationData::getSalt() const
|
||||
{
|
||||
return salt;
|
||||
}
|
||||
|
||||
void AuthenticationData::setSSLCertificateCommonNames(boost::container::flat_set<String> common_names_)
|
||||
{
|
||||
|
@ -76,6 +76,10 @@ public:
|
||||
void setPasswordHashBinary(const Digest & hash);
|
||||
const Digest & getPasswordHashBinary() const { return password_hash; }
|
||||
|
||||
/// Sets the salt in String form.
|
||||
void setSalt(String salt);
|
||||
String getSalt() const;
|
||||
|
||||
/// Sets the server name for authentication type LDAP.
|
||||
const String & getLDAPServerName() const { return ldap_server_name; }
|
||||
void setLDAPServerName(const String & name) { ldap_server_name = name; }
|
||||
@ -106,6 +110,7 @@ private:
|
||||
String ldap_server_name;
|
||||
String kerberos_realm;
|
||||
boost::container::flat_set<String> ssl_certificate_common_names;
|
||||
String salt;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ public:
|
||||
throw Exception("Logical error: single argument is passed to AggregateFunctionIfNullVariadic", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (number_of_arguments > MAX_ARGS)
|
||||
throw Exception("Maximum number of arguments for aggregate function with Nullable types is " + toString(size_t(MAX_ARGS)),
|
||||
throw Exception("Maximum number of arguments for aggregate function with Nullable types is " + toString(MAX_ARGS),
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
for (size_t i = 0; i < number_of_arguments; ++i)
|
||||
@ -359,7 +359,7 @@ private:
|
||||
using Base = AggregateFunctionNullBase<result_is_nullable, serialize_flag,
|
||||
AggregateFunctionIfNullVariadic<result_is_nullable, serialize_flag, null_is_skipped>>;
|
||||
|
||||
enum { MAX_ARGS = 8 };
|
||||
static constexpr size_t MAX_ARGS = 8;
|
||||
size_t number_of_arguments = 0;
|
||||
std::array<char, MAX_ARGS> is_nullable; /// Plain array is better than std::vector due to one indirection less.
|
||||
};
|
||||
|
@ -56,8 +56,8 @@ namespace
|
||||
|
||||
/// Such default parameters were picked because they did good on some tests,
|
||||
/// though it still requires to fit parameters to achieve better result
|
||||
auto learning_rate = Float64(1.0);
|
||||
auto l2_reg_coef = Float64(0.5);
|
||||
auto learning_rate = static_cast<Float64>(1.0);
|
||||
auto l2_reg_coef = static_cast<Float64>(0.5);
|
||||
UInt64 batch_size = 15;
|
||||
|
||||
std::string weights_updater_name = "Adam";
|
||||
|
@ -607,7 +607,7 @@ MutableColumns ColumnAggregateFunction::scatter(IColumn::ColumnIndex num_columns
|
||||
size_t num_rows = size();
|
||||
|
||||
{
|
||||
size_t reserve_size = double(num_rows) / num_columns * 1.1; /// 1.1 is just a guess. Better to use n-sigma rule.
|
||||
size_t reserve_size = static_cast<double>(num_rows) / num_columns * 1.1; /// 1.1 is just a guess. Better to use n-sigma rule.
|
||||
|
||||
if (reserve_size > 1)
|
||||
for (auto & column : columns)
|
||||
|
@ -81,7 +81,7 @@ namespace
|
||||
if (max_val > size)
|
||||
return mapUniqueIndexImplRef(index);
|
||||
|
||||
auto map_size = UInt64(max_val) + 1;
|
||||
auto map_size = static_cast<UInt64>(max_val) + 1;
|
||||
PaddedPODArray<T> map(map_size, 0);
|
||||
T zero_pos_value = index[0];
|
||||
index[0] = 0;
|
||||
@ -98,7 +98,7 @@ namespace
|
||||
index[i] = map[val];
|
||||
}
|
||||
|
||||
auto res_col = ColumnVector<T>::create(UInt64(cur_pos) + 1);
|
||||
auto res_col = ColumnVector<T>::create(static_cast<UInt64>(cur_pos) + 1);
|
||||
auto & data = res_col->getData();
|
||||
data[0] = zero_pos_value;
|
||||
for (size_t i = 0; i < map_size; ++i)
|
||||
|
@ -228,7 +228,7 @@ void ColumnVector<T>::getPermutation(IColumn::PermutationSortDirection direction
|
||||
if (s >= 256 && s <= std::numeric_limits<UInt32>::max() && use_radix_sort)
|
||||
{
|
||||
PaddedPODArray<ValueWithIndex<T>> pairs(s);
|
||||
for (UInt32 i = 0; i < UInt32(s); ++i)
|
||||
for (UInt32 i = 0; i < static_cast<UInt32>(s); ++i)
|
||||
pairs[i] = {data[i], i};
|
||||
|
||||
RadixSort<RadixSortTraits<T>>::executeLSD(pairs.data(), s, reverse, res.data());
|
||||
|
@ -82,7 +82,7 @@ void FieldVisitorWriteBinary::operator() (const Object & x, WriteBuffer & buf) c
|
||||
|
||||
void FieldVisitorWriteBinary::operator()(const bool & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(x), buf);
|
||||
writeBinary(static_cast<UInt8>(x), buf);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -561,6 +561,7 @@ void FileSegment::completeImpl(std::lock_guard<std::mutex> & cache_lock, std::lo
|
||||
* in FileSegmentsHolder represent a contiguous range, so we can resize
|
||||
* it only when nobody needs it.
|
||||
*/
|
||||
download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION;
|
||||
LOG_TEST(log, "Resize cell {} to downloaded: {}", range().toString(), current_downloaded_size);
|
||||
cache->reduceSizeToDownloaded(key(), offset(), cache_lock, segment_lock);
|
||||
}
|
||||
@ -646,6 +647,13 @@ void FileSegment::assertNotDetached() const
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Operation not allowed, file segment is detached");
|
||||
}
|
||||
|
||||
void FileSegment::assertDetachedStatus() const
|
||||
{
|
||||
assert(
|
||||
(download_state == State::EMPTY) || (download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)
|
||||
|| (download_state == State::SKIP_CACHE));
|
||||
}
|
||||
|
||||
FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard<std::mutex> & /* cache_lock */)
|
||||
{
|
||||
auto snapshot = std::make_shared<FileSegment>(
|
||||
@ -683,7 +691,7 @@ FileSegmentsHolder::~FileSegmentsHolder()
|
||||
{
|
||||
/// This file segment is not owned by cache, so it will be destructed
|
||||
/// at this point, therefore no completion required.
|
||||
assert(file_segment->state() == FileSegment::State::EMPTY);
|
||||
file_segment->assertDetachedStatus();
|
||||
file_segment_it = file_segments.erase(current_file_segment_it);
|
||||
continue;
|
||||
}
|
||||
|
@ -151,6 +151,8 @@ private:
|
||||
String getInfoForLogImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
void assertCorrectnessImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
void assertNotDetached() const;
|
||||
void assertDetachedStatus() const;
|
||||
|
||||
|
||||
void setDownloaded(std::lock_guard<std::mutex> & segment_lock);
|
||||
void setDownloadFailed(std::lock_guard<std::mutex> & segment_lock);
|
||||
|
@ -207,15 +207,15 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
||||
|
||||
/// Replace the file descriptors with the ends of our pipes.
|
||||
if (STDIN_FILENO != dup2(pipe_stdin.fds_rw[0], STDIN_FILENO))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_STDIN));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDIN));
|
||||
|
||||
if (!config.pipe_stdin_only)
|
||||
{
|
||||
if (STDOUT_FILENO != dup2(pipe_stdout.fds_rw[1], STDOUT_FILENO))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_STDOUT));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDOUT));
|
||||
|
||||
if (STDERR_FILENO != dup2(pipe_stderr.fds_rw[1], STDERR_FILENO))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_STDERR));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDERR));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < config.read_fds.size(); ++i)
|
||||
@ -224,7 +224,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
||||
auto fd = config.read_fds[i];
|
||||
|
||||
if (fd != dup2(fds.fds_rw[1], fd))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < config.write_fds.size(); ++i)
|
||||
@ -233,7 +233,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
||||
auto fd = config.write_fds[i];
|
||||
|
||||
if (fd != dup2(fds.fds_rw[0], fd))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR));
|
||||
}
|
||||
|
||||
// Reset the signal mask: it may be non-empty and will be inherited
|
||||
@ -246,7 +246,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
||||
execv(filename, argv);
|
||||
/// If the process is running, then `execv` does not return here.
|
||||
|
||||
_exit(int(ReturnCodes::CANNOT_EXEC));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_EXEC));
|
||||
}
|
||||
|
||||
std::unique_ptr<ShellCommand> res(new ShellCommand(
|
||||
@ -356,17 +356,17 @@ void ShellCommand::wait()
|
||||
{
|
||||
switch (retcode)
|
||||
{
|
||||
case int(ReturnCodes::CANNOT_DUP_STDIN):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDIN):
|
||||
throw Exception("Cannot dup2 stdin of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_DUP_STDOUT):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDOUT):
|
||||
throw Exception("Cannot dup2 stdout of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_DUP_STDERR):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDERR):
|
||||
throw Exception("Cannot dup2 stderr of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_EXEC):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_EXEC):
|
||||
throw Exception("Cannot execv in child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR):
|
||||
throw Exception("Cannot dup2 read descriptor of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR):
|
||||
throw Exception("Cannot dup2 write descriptor of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
default:
|
||||
throw Exception("Child process was exited with return code " + toString(retcode), ErrorCodes::CHILD_WAS_NOT_EXITED_NORMALLY);
|
||||
|
@ -154,7 +154,7 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, int priority, std::opti
|
||||
new_job_or_shutdown.notify_one();
|
||||
}
|
||||
|
||||
return ReturnType(true);
|
||||
return static_cast<ReturnType>(true);
|
||||
}
|
||||
|
||||
template <typename Thread>
|
||||
|
@ -64,7 +64,7 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Int
|
||||
|
||||
size_t stack_trace_size = stack_trace.getSize();
|
||||
size_t stack_trace_offset = stack_trace.getOffset();
|
||||
writeIntBinary(UInt8(stack_trace_size - stack_trace_offset), out);
|
||||
writeIntBinary(static_cast<UInt8>(stack_trace_size - stack_trace_offset), out);
|
||||
for (size_t i = stack_trace_offset; i < stack_trace_size; ++i)
|
||||
writePODBinary(stack_trace.getFramePointers()[i], out);
|
||||
|
||||
|
@ -514,7 +514,7 @@ void TestKeeper::processingThread()
|
||||
{
|
||||
RequestInfo info;
|
||||
|
||||
UInt64 max_wait = UInt64(operation_timeout.totalMilliseconds());
|
||||
UInt64 max_wait = static_cast<UInt64>(operation_timeout.totalMilliseconds());
|
||||
if (requests_queue.tryPop(info, max_wait))
|
||||
{
|
||||
if (expired)
|
||||
|
@ -76,7 +76,7 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_
|
||||
auto & host_string = host.host;
|
||||
try
|
||||
{
|
||||
bool secure = bool(startsWith(host_string, "secure://"));
|
||||
bool secure = startsWith(host_string, "secure://");
|
||||
|
||||
if (secure)
|
||||
host_string.erase(0, strlen("secure://"));
|
||||
@ -801,7 +801,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
|
||||
|
||||
auto callback = [state](const Coordination::GetResponse & response)
|
||||
{
|
||||
state->code = int32_t(response.error);
|
||||
state->code = static_cast<int32_t>(response.error);
|
||||
if (state->code)
|
||||
state->event.set();
|
||||
};
|
||||
@ -810,7 +810,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
|
||||
{
|
||||
if (!state->code)
|
||||
{
|
||||
state->code = int32_t(response.error);
|
||||
state->code = static_cast<int32_t>(response.error);
|
||||
if (!state->code)
|
||||
state->event_type = response.type;
|
||||
state->event.set();
|
||||
@ -828,7 +828,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
|
||||
if (!state->event.tryWait(1000))
|
||||
continue;
|
||||
|
||||
if (state->code == int32_t(Coordination::Error::ZNONODE))
|
||||
if (state->code == static_cast<int32_t>(Coordination::Error::ZNONODE))
|
||||
return true;
|
||||
|
||||
if (state->code)
|
||||
|
@ -40,7 +40,7 @@ void write(bool x, WriteBuffer & out)
|
||||
|
||||
void write(const std::string & s, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(s.size()), out);
|
||||
write(static_cast<int32_t>(s.size()), out);
|
||||
out.write(s.data(), s.size());
|
||||
}
|
||||
|
||||
|
@ -539,7 +539,7 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
|
||||
Error::ZMARSHALLINGERROR);
|
||||
|
||||
if (err != Error::ZOK)
|
||||
throw Exception("Error received in reply to auth request. Code: " + DB::toString(int32_t(err)) + ". Message: " + String(errorMessage(err)),
|
||||
throw Exception("Error received in reply to auth request. Code: " + DB::toString(static_cast<int32_t>(err)) + ". Message: " + String(errorMessage(err)),
|
||||
Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
|
||||
@ -563,8 +563,8 @@ void ZooKeeper::sendThread()
|
||||
{
|
||||
/// Wait for the next request in queue. No more than operation timeout. No more than until next heartbeat time.
|
||||
UInt64 max_wait = std::min(
|
||||
UInt64(std::chrono::duration_cast<std::chrono::milliseconds>(next_heartbeat_time - now).count()),
|
||||
UInt64(operation_timeout.totalMilliseconds()));
|
||||
static_cast<UInt64>(std::chrono::duration_cast<std::chrono::milliseconds>(next_heartbeat_time - now).count()),
|
||||
static_cast<UInt64>(operation_timeout.totalMilliseconds()));
|
||||
|
||||
RequestInfo info;
|
||||
if (requests_queue.tryPop(info, max_wait))
|
||||
|
@ -153,7 +153,7 @@ void formatIPv6(const unsigned char * src, char *& dst, uint8_t zeroed_tail_byte
|
||||
}
|
||||
|
||||
/// Was it a trailing run of 0x00's?
|
||||
if (best.base != -1 && size_t(best.base) + size_t(best.len) == words.size())
|
||||
if (best.base != -1 && static_cast<size_t>(best.base) + static_cast<size_t>(best.len) == words.size())
|
||||
*dst++ = ':';
|
||||
|
||||
*dst++ = '\0';
|
||||
|
@ -143,7 +143,7 @@ void CompressionCodecDelta::doDecompressData(const char * source, UInt32 source_
|
||||
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
|
||||
UInt32 output_size = uncompressed_size - bytes_to_skip;
|
||||
|
||||
if (UInt32(2 + bytes_to_skip) > source_size)
|
||||
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
|
||||
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
|
||||
|
||||
memcpy(dest, &source[2], bytes_to_skip);
|
||||
@ -186,7 +186,7 @@ UInt8 getDeltaBytesSize(const IDataType * column_type)
|
||||
|
||||
void registerCodecDelta(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = UInt8(CompressionMethodByte::Delta);
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::Delta);
|
||||
factory.registerCompressionCodecWithType("Delta", method_code, [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
|
||||
{
|
||||
UInt8 delta_bytes_size = 0;
|
||||
|
@ -520,7 +520,7 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s
|
||||
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
|
||||
UInt32 output_size = uncompressed_size - bytes_to_skip;
|
||||
|
||||
if (UInt32(2 + bytes_to_skip) > source_size)
|
||||
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
|
||||
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
|
||||
|
||||
memcpy(dest, &source[2], bytes_to_skip);
|
||||
@ -544,7 +544,7 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s
|
||||
|
||||
void registerCodecDoubleDelta(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = UInt8(CompressionMethodByte::DoubleDelta);
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::DoubleDelta);
|
||||
factory.registerCompressionCodecWithType("DoubleDelta", method_code,
|
||||
[&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
|
||||
{
|
||||
|
@ -50,11 +50,11 @@ uint8_t getMethodCode(EncryptionMethod Method)
|
||||
{
|
||||
if (Method == AES_128_GCM_SIV)
|
||||
{
|
||||
return uint8_t(CompressionMethodByte::AES_128_GCM_SIV);
|
||||
return static_cast<uint8_t>(CompressionMethodByte::AES_128_GCM_SIV);
|
||||
}
|
||||
else if (Method == AES_256_GCM_SIV)
|
||||
{
|
||||
return uint8_t(CompressionMethodByte::AES_256_GCM_SIV);
|
||||
return static_cast<uint8_t>(CompressionMethodByte::AES_256_GCM_SIV);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -419,7 +419,7 @@ void CompressionCodecGorilla::doDecompressData(const char * source, UInt32 sourc
|
||||
|
||||
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
|
||||
|
||||
if (UInt32(2 + bytes_to_skip) > source_size)
|
||||
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
|
||||
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
|
||||
|
||||
memcpy(dest, &source[2], bytes_to_skip);
|
||||
@ -443,7 +443,7 @@ void CompressionCodecGorilla::doDecompressData(const char * source, UInt32 sourc
|
||||
|
||||
void registerCodecGorilla(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = UInt8(CompressionMethodByte::Gorilla);
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::Gorilla);
|
||||
factory.registerCompressionCodecWithType("Gorilla", method_code,
|
||||
[&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
|
||||
{
|
||||
|
@ -112,7 +112,7 @@ MagicNumber serializeTypeId(TypeIndex type_id)
|
||||
break;
|
||||
}
|
||||
|
||||
throw Exception("Type is not supported by T64 codec: " + toString(UInt32(type_id)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Type is not supported by T64 codec: " + toString(static_cast<UInt32>(type_id)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
TypeIndex deserializeTypeId(uint8_t serialized_type_id)
|
||||
@ -137,7 +137,7 @@ TypeIndex deserializeTypeId(uint8_t serialized_type_id)
|
||||
case MagicNumber::Decimal64: return TypeIndex::Decimal64;
|
||||
}
|
||||
|
||||
throw Exception("Bad magic number in T64 codec: " + toString(UInt32(serialized_type_id)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Bad magic number in T64 codec: " + toString(static_cast<UInt32>(serialized_type_id)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
|
||||
@ -284,22 +284,22 @@ void reverseTransposeBytes(const UInt64 * matrix, UInt32 col, T & value)
|
||||
|
||||
if constexpr (sizeof(T) > 4)
|
||||
{
|
||||
value |= UInt64(matrix8[64 * 7 + col]) << (8 * 7);
|
||||
value |= UInt64(matrix8[64 * 6 + col]) << (8 * 6);
|
||||
value |= UInt64(matrix8[64 * 5 + col]) << (8 * 5);
|
||||
value |= UInt64(matrix8[64 * 4 + col]) << (8 * 4);
|
||||
value |= static_cast<UInt64>(matrix8[64 * 7 + col]) << (8 * 7);
|
||||
value |= static_cast<UInt64>(matrix8[64 * 6 + col]) << (8 * 6);
|
||||
value |= static_cast<UInt64>(matrix8[64 * 5 + col]) << (8 * 5);
|
||||
value |= static_cast<UInt64>(matrix8[64 * 4 + col]) << (8 * 4);
|
||||
}
|
||||
|
||||
if constexpr (sizeof(T) > 2)
|
||||
{
|
||||
value |= UInt32(matrix8[64 * 3 + col]) << (8 * 3);
|
||||
value |= UInt32(matrix8[64 * 2 + col]) << (8 * 2);
|
||||
value |= static_cast<UInt32>(matrix8[64 * 3 + col]) << (8 * 3);
|
||||
value |= static_cast<UInt32>(matrix8[64 * 2 + col]) << (8 * 2);
|
||||
}
|
||||
|
||||
if constexpr (sizeof(T) > 1)
|
||||
value |= UInt32(matrix8[64 * 1 + col]) << (8 * 1);
|
||||
value |= static_cast<UInt32>(matrix8[64 * 1 + col]) << (8 * 1);
|
||||
|
||||
value |= UInt32(matrix8[col]);
|
||||
value |= static_cast<UInt32>(matrix8[col]);
|
||||
}
|
||||
|
||||
|
||||
@ -422,12 +422,12 @@ UInt32 getValuableBitsNumber(Int64 min, Int64 max)
|
||||
if (min < 0 && max >= 0)
|
||||
{
|
||||
if (min + max >= 0)
|
||||
return getValuableBitsNumber(0ull, UInt64(max)) + 1;
|
||||
return getValuableBitsNumber(0ull, static_cast<UInt64>(max)) + 1;
|
||||
else
|
||||
return getValuableBitsNumber(0ull, UInt64(~min)) + 1;
|
||||
return getValuableBitsNumber(0ull, static_cast<UInt64>(~min)) + 1;
|
||||
}
|
||||
else
|
||||
return getValuableBitsNumber(UInt64(min), UInt64(max));
|
||||
return getValuableBitsNumber(static_cast<UInt64>(min), static_cast<UInt64>(max));
|
||||
}
|
||||
|
||||
|
||||
@ -559,14 +559,14 @@ void decompressData(const char * src, UInt32 bytes_size, char * dst, UInt32 unco
|
||||
T upper_max [[maybe_unused]] = 0;
|
||||
T sign_bit [[maybe_unused]] = 0;
|
||||
if (num_bits < 64)
|
||||
upper_min = UInt64(min) >> num_bits << num_bits;
|
||||
upper_min = static_cast<UInt64>(min) >> num_bits << num_bits;
|
||||
|
||||
if constexpr (is_signed_v<T>)
|
||||
{
|
||||
if (min < 0 && max >= 0 && num_bits < 64)
|
||||
{
|
||||
sign_bit = 1ull << (num_bits - 1);
|
||||
upper_max = UInt64(max) >> num_bits << num_bits;
|
||||
upper_max = static_cast<UInt64>(max) >> num_bits << num_bits;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ CompressionCodecZSTD::CompressionCodecZSTD(int level_) : level(level_), enable_l
|
||||
|
||||
void registerCodecZSTD(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = UInt8(CompressionMethodByte::ZSTD);
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::ZSTD);
|
||||
factory.registerCompressionCodec("ZSTD", method_code, [&](const ASTPtr & arguments) -> CompressionCodecPtr {
|
||||
int level = CompressionCodecZSTD::ZSTD_DEFAULT_LEVEL;
|
||||
if (arguments && !arguments->children.empty())
|
||||
|
@ -628,12 +628,12 @@ void StreamStatistics::print() const
|
||||
{
|
||||
std::cerr
|
||||
<< "Num tokens: " << num_tokens
|
||||
<< ", Avg literal length: " << double(sum_literal_lengths) / num_tokens
|
||||
<< ", Avg match length: " << double(sum_match_lengths) / num_tokens
|
||||
<< ", Avg match offset: " << double(sum_match_offsets) / num_tokens
|
||||
<< ", Offset < 8 ratio: " << double(count_match_offset_less_8) / num_tokens
|
||||
<< ", Offset < 16 ratio: " << double(count_match_offset_less_16) / num_tokens
|
||||
<< ", Match replicate itself: " << double(count_match_replicate_itself) / num_tokens
|
||||
<< ", Avg literal length: " << static_cast<double>(sum_literal_lengths) / num_tokens
|
||||
<< ", Avg match length: " << static_cast<double>(sum_match_lengths) / num_tokens
|
||||
<< ", Avg match offset: " << static_cast<double>(sum_match_offsets) / num_tokens
|
||||
<< ", Offset < 8 ratio: " << static_cast<double>(count_match_offset_less_8) / num_tokens
|
||||
<< ", Offset < 16 ratio: " << static_cast<double>(count_match_offset_less_16) / num_tokens
|
||||
<< ", Match replicate itself: " << static_cast<double>(count_match_replicate_itself) / num_tokens
|
||||
<< "\n";
|
||||
}
|
||||
|
||||
|
@ -99,20 +99,20 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
|
||||
writeText("max_requests_batch_size=", buf);
|
||||
write_int(coordination_settings->max_requests_batch_size);
|
||||
writeText("min_session_timeout_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->min_session_timeout_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->min_session_timeout_ms));
|
||||
writeText("session_timeout_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->session_timeout_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->session_timeout_ms));
|
||||
writeText("operation_timeout_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->operation_timeout_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->operation_timeout_ms));
|
||||
writeText("dead_session_check_period_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->dead_session_check_period_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->dead_session_check_period_ms));
|
||||
|
||||
writeText("heart_beat_interval_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->heart_beat_interval_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->heart_beat_interval_ms));
|
||||
writeText("election_timeout_lower_bound_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->election_timeout_lower_bound_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->election_timeout_lower_bound_ms));
|
||||
writeText("election_timeout_upper_bound_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->election_timeout_upper_bound_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->election_timeout_upper_bound_ms));
|
||||
|
||||
writeText("reserved_log_items=", buf);
|
||||
write_int(coordination_settings->reserved_log_items);
|
||||
@ -122,9 +122,9 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
|
||||
writeText("auto_forwarding=", buf);
|
||||
write_bool(coordination_settings->auto_forwarding);
|
||||
writeText("shutdown_timeout=", buf);
|
||||
write_int(uint64_t(coordination_settings->shutdown_timeout));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->shutdown_timeout));
|
||||
writeText("startup_timeout=", buf);
|
||||
write_int(uint64_t(coordination_settings->startup_timeout));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->startup_timeout));
|
||||
|
||||
writeText("raft_logs_level=", buf);
|
||||
writeText(coordination_settings->raft_logs_level.toString(), buf);
|
||||
|
@ -221,7 +221,7 @@ namespace MySQLReplication
|
||||
case MYSQL_TYPE_BLOB:
|
||||
case MYSQL_TYPE_GEOMETRY:
|
||||
{
|
||||
column_meta.emplace_back(UInt16(meta[pos]));
|
||||
column_meta.emplace_back(static_cast<UInt16>(meta[pos]));
|
||||
pos += 1;
|
||||
break;
|
||||
}
|
||||
@ -229,9 +229,9 @@ namespace MySQLReplication
|
||||
case MYSQL_TYPE_STRING:
|
||||
{
|
||||
/// Big-Endian
|
||||
auto b0 = UInt16(meta[pos] << 8);
|
||||
auto b1 = UInt8(meta[pos + 1]);
|
||||
column_meta.emplace_back(UInt16(b0 + b1));
|
||||
auto b0 = static_cast<UInt16>(meta[pos] << 8);
|
||||
auto b1 = static_cast<UInt8>(meta[pos + 1]);
|
||||
column_meta.emplace_back(static_cast<UInt16>(b0 + b1));
|
||||
pos += 2;
|
||||
break;
|
||||
}
|
||||
@ -239,9 +239,9 @@ namespace MySQLReplication
|
||||
case MYSQL_TYPE_VARCHAR:
|
||||
case MYSQL_TYPE_VAR_STRING: {
|
||||
/// Little-Endian
|
||||
auto b0 = UInt8(meta[pos]);
|
||||
auto b1 = UInt16(meta[pos + 1] << 8);
|
||||
column_meta.emplace_back(UInt16(b0 + b1));
|
||||
auto b0 = static_cast<UInt8>(meta[pos]);
|
||||
auto b1 = static_cast<UInt16>(meta[pos + 1] << 8);
|
||||
column_meta.emplace_back(static_cast<UInt16>(b0 + b1));
|
||||
pos += 2;
|
||||
break;
|
||||
}
|
||||
@ -543,7 +543,7 @@ namespace MySQLReplication
|
||||
);
|
||||
|
||||
if (!meta)
|
||||
row.push_back(Field{UInt32(date_time)});
|
||||
row.push_back(Field{static_cast<UInt32>(date_time)});
|
||||
else
|
||||
{
|
||||
DB::DecimalUtils::DecimalComponents<DateTime64> components{
|
||||
@ -603,7 +603,7 @@ namespace MySQLReplication
|
||||
throw Exception("Attempt to read after EOF.", ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF);
|
||||
|
||||
if ((*payload.position() & 0x80) == 0)
|
||||
mask = UInt32(-1);
|
||||
mask = static_cast<UInt32>(-1);
|
||||
|
||||
*payload.position() ^= 0x80;
|
||||
|
||||
|
@ -519,7 +519,7 @@ class IColumn;
|
||||
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
|
||||
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
|
||||
M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result", 0) \
|
||||
M(UInt64, distributed_ddl_entry_format_version, 1, "Version of DDL entry to write into ZooKeeper", 0) \
|
||||
M(UInt64, distributed_ddl_entry_format_version, 2, "Version of DDL entry to write into ZooKeeper", 0) \
|
||||
\
|
||||
M(UInt64, external_storage_max_read_rows, 0, "Limit maximum number of rows when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \
|
||||
M(UInt64, external_storage_max_read_bytes, 0, "Limit maximum number of bytes when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \
|
||||
|
@ -142,23 +142,30 @@ void SerializationMap::deserializeTextImpl(IColumn & column, ReadBuffer & istr,
|
||||
break;
|
||||
|
||||
reader(istr, key, key_column);
|
||||
++size;
|
||||
|
||||
skipWhitespaceIfAny(istr);
|
||||
assertChar(':', istr);
|
||||
|
||||
++size;
|
||||
skipWhitespaceIfAny(istr);
|
||||
|
||||
reader(istr, value, value_column);
|
||||
|
||||
skipWhitespaceIfAny(istr);
|
||||
}
|
||||
|
||||
offsets.push_back(offsets.back() + size);
|
||||
assertChar('}', istr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (size)
|
||||
{
|
||||
nested_tuple.getColumnPtr(0) = key_column.cut(0, offsets.back());
|
||||
nested_tuple.getColumnPtr(1) = value_column.cut(0, offsets.back());
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
offsets.push_back(offsets.back() + size);
|
||||
}
|
||||
|
||||
void SerializationMap::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
|
@ -301,7 +301,7 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
|
||||
uint8_t addrv6_buf[IPV6_BINARY_LENGTH];
|
||||
for (const auto i : collections::range(0, rows))
|
||||
{
|
||||
auto addrv4 = UInt32(first_column->get64(i));
|
||||
auto addrv4 = static_cast<UInt32>(first_column->get64(i));
|
||||
auto found = tryLookupIPv4(addrv4, addrv6_buf);
|
||||
out[i] = (found != ipNotFound());
|
||||
keys_found += out[i];
|
||||
@ -387,7 +387,7 @@ void IPAddressDictionary::loadData()
|
||||
setAttributeValue(attribute, attribute_column[row]);
|
||||
}
|
||||
|
||||
const auto [addr, prefix] = parseIPFromString(std::string_view(key_column_ptr->getDataAt(row)));
|
||||
const auto [addr, prefix] = parseIPFromString(std::string_view{key_column_ptr->getDataAt(row)});
|
||||
has_ipv6 = has_ipv6 || (addr.family() == Poco::Net::IPAddress::IPv6);
|
||||
|
||||
size_t row_number = ip_records.size();
|
||||
@ -716,7 +716,7 @@ void IPAddressDictionary::getItemsImpl(
|
||||
for (const auto i : collections::range(0, rows))
|
||||
{
|
||||
// addrv4 has native endianness
|
||||
auto addrv4 = UInt32(first_column->get64(i));
|
||||
auto addrv4 = static_cast<UInt32>(first_column->get64(i));
|
||||
auto found = tryLookupIPv4(addrv4, addrv6_buf);
|
||||
if (found != ipNotFound())
|
||||
{
|
||||
|
@ -181,7 +181,7 @@ Pipe MongoDBDictionarySource::loadIds(const std::vector<UInt64> & ids)
|
||||
|
||||
Poco::MongoDB::Array::Ptr ids_array(new Poco::MongoDB::Array);
|
||||
for (const UInt64 id : ids)
|
||||
ids_array->add(DB::toString(id), Int32(id));
|
||||
ids_array->add(DB::toString(id), static_cast<Int32>(id));
|
||||
|
||||
cursor->query().selector().addNewDocument(dict_struct.id->name).add("$in", ids_array);
|
||||
|
||||
@ -218,7 +218,7 @@ Pipe MongoDBDictionarySource::loadKeys(const Columns & key_columns, const std::v
|
||||
case AttributeUnderlyingType::Int32:
|
||||
case AttributeUnderlyingType::Int64:
|
||||
{
|
||||
key.add(key_attribute.name, Int32(key_columns[attribute_index]->get64(row_idx)));
|
||||
key.add(key_attribute.name, static_cast<Int32>(key_columns[attribute_index]->get64(row_idx)));
|
||||
break;
|
||||
}
|
||||
case AttributeUnderlyingType::Float32:
|
||||
|
@ -93,7 +93,7 @@ private:
|
||||
if (!first)
|
||||
writeChar(',', out);
|
||||
first = false;
|
||||
writeIntText(T(bit), out);
|
||||
writeIntText(static_cast<T>(bit), out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -651,18 +651,18 @@ struct ParseMACImpl
|
||||
*/
|
||||
static UInt64 parse(const char * pos)
|
||||
{
|
||||
return (UInt64(unhex(pos[0])) << 44)
|
||||
| (UInt64(unhex(pos[1])) << 40)
|
||||
| (UInt64(unhex(pos[3])) << 36)
|
||||
| (UInt64(unhex(pos[4])) << 32)
|
||||
| (UInt64(unhex(pos[6])) << 28)
|
||||
| (UInt64(unhex(pos[7])) << 24)
|
||||
| (UInt64(unhex(pos[9])) << 20)
|
||||
| (UInt64(unhex(pos[10])) << 16)
|
||||
| (UInt64(unhex(pos[12])) << 12)
|
||||
| (UInt64(unhex(pos[13])) << 8)
|
||||
| (UInt64(unhex(pos[15])) << 4)
|
||||
| (UInt64(unhex(pos[16])));
|
||||
return (static_cast<UInt64>(unhex(pos[0])) << 44)
|
||||
| (static_cast<UInt64>(unhex(pos[1])) << 40)
|
||||
| (static_cast<UInt64>(unhex(pos[3])) << 36)
|
||||
| (static_cast<UInt64>(unhex(pos[4])) << 32)
|
||||
| (static_cast<UInt64>(unhex(pos[6])) << 28)
|
||||
| (static_cast<UInt64>(unhex(pos[7])) << 24)
|
||||
| (static_cast<UInt64>(unhex(pos[9])) << 20)
|
||||
| (static_cast<UInt64>(unhex(pos[10])) << 16)
|
||||
| (static_cast<UInt64>(unhex(pos[12])) << 12)
|
||||
| (static_cast<UInt64>(unhex(pos[13])) << 8)
|
||||
| (static_cast<UInt64>(unhex(pos[15])) << 4)
|
||||
| (static_cast<UInt64>(unhex(pos[16])));
|
||||
}
|
||||
|
||||
static constexpr auto name = "MACStringToNum";
|
||||
@ -678,12 +678,12 @@ struct ParseOUIImpl
|
||||
*/
|
||||
static UInt64 parse(const char * pos)
|
||||
{
|
||||
return (UInt64(unhex(pos[0])) << 20)
|
||||
| (UInt64(unhex(pos[1])) << 16)
|
||||
| (UInt64(unhex(pos[3])) << 12)
|
||||
| (UInt64(unhex(pos[4])) << 8)
|
||||
| (UInt64(unhex(pos[6])) << 4)
|
||||
| (UInt64(unhex(pos[7])));
|
||||
return (static_cast<UInt64>(unhex(pos[0])) << 20)
|
||||
| (static_cast<UInt64>(unhex(pos[1])) << 16)
|
||||
| (static_cast<UInt64>(unhex(pos[3])) << 12)
|
||||
| (static_cast<UInt64>(unhex(pos[4])) << 8)
|
||||
| (static_cast<UInt64>(unhex(pos[6])) << 4)
|
||||
| (static_cast<UInt64>(unhex(pos[7])));
|
||||
}
|
||||
|
||||
static constexpr auto name = "MACStringToOUI";
|
||||
@ -895,9 +895,9 @@ private:
|
||||
if (bits_to_keep >= 8 * sizeof(UInt32))
|
||||
return { src, src };
|
||||
if (bits_to_keep == 0)
|
||||
return { UInt32(0), UInt32(-1) };
|
||||
return { static_cast<UInt32>(0), static_cast<UInt32>(-1) };
|
||||
|
||||
UInt32 mask = UInt32(-1) << (8 * sizeof(UInt32) - bits_to_keep);
|
||||
UInt32 mask = static_cast<UInt32>(-1) << (8 * sizeof(UInt32) - bits_to_keep);
|
||||
UInt32 lower = src & mask;
|
||||
UInt32 upper = lower | ~mask;
|
||||
|
||||
|
@ -565,7 +565,7 @@ ColumnPtr FunctionAnyArityLogical<Impl, Name>::executeShortCircuit(ColumnsWithTy
|
||||
/// The result is !mask_n.
|
||||
|
||||
bool inverted = Name::name != NameAnd::name;
|
||||
UInt8 null_value = UInt8(Name::name == NameAnd::name);
|
||||
UInt8 null_value = static_cast<UInt8>(Name::name == NameAnd::name);
|
||||
IColumn::Filter mask(arguments[0].column->size(), 1);
|
||||
|
||||
/// If result is nullable, we need to create null bytemap of the resulting column.
|
||||
|
@ -271,9 +271,9 @@ struct NgramDistanceImpl
|
||||
size_t first_size = dispatchSearcher(calculateHaystackStatsAndMetric<false>, data.data(), data_size, common_stats.get(), distance, nullptr);
|
||||
/// For !symmetric version we should not use first_size.
|
||||
if constexpr (symmetric)
|
||||
res = distance * 1.f / std::max(first_size + second_size, size_t(1));
|
||||
res = distance * 1.f / std::max(first_size + second_size, static_cast<size_t>(1));
|
||||
else
|
||||
res = 1.f - distance * 1.f / std::max(second_size, size_t(1));
|
||||
res = 1.f - distance * 1.f / std::max(second_size, static_cast<size_t>(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -339,9 +339,9 @@ struct NgramDistanceImpl
|
||||
|
||||
/// For !symmetric version we should not use haystack_stats_size.
|
||||
if constexpr (symmetric)
|
||||
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, size_t(1));
|
||||
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, static_cast<size_t>(1));
|
||||
else
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -410,7 +410,7 @@ struct NgramDistanceImpl
|
||||
for (size_t j = 0; j < needle_stats_size; ++j)
|
||||
--common_stats[needle_ngram_storage[j]];
|
||||
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -457,9 +457,9 @@ struct NgramDistanceImpl
|
||||
ngram_storage.get());
|
||||
/// For !symmetric version we should not use haystack_stats_size.
|
||||
if constexpr (symmetric)
|
||||
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, size_t(1));
|
||||
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, static_cast<size_t>(1));
|
||||
else
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
if (txn)
|
||||
res = {txn->tid.start_csn, txn->tid.local_tid, txn->tid.host_id};
|
||||
else
|
||||
res = {UInt64(0), UInt64(0), UUIDHelpers::Nil};
|
||||
res = {static_cast<UInt64>(0), static_cast<UInt64>(0), UUIDHelpers::Nil};
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -306,7 +306,7 @@ GeohashesInBoxPreparedArgs geohashesInBoxPrepare(
|
||||
|
||||
return GeohashesInBoxPreparedArgs
|
||||
{
|
||||
std::max<UInt64>(1, UInt64(lon_items) * lat_items),
|
||||
std::max<UInt64>(1, static_cast<UInt64>(lon_items) * lat_items),
|
||||
lon_items,
|
||||
lat_items,
|
||||
lon_min,
|
||||
|
@ -113,7 +113,7 @@ private:
|
||||
return default_port;
|
||||
|
||||
port = (port * 10) + (*p - '0');
|
||||
if (port < 0 || port > UInt16(-1))
|
||||
if (port < 0 || port > static_cast<UInt16>(-1))
|
||||
return default_port;
|
||||
++p;
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ private:
|
||||
src_offset = src_offsets[i];
|
||||
dst_offset += src_length;
|
||||
|
||||
if (src_length > 1 && dst_data[dst_offset - 2] != UInt8(trailing_char_str.front()))
|
||||
if (src_length > 1 && dst_data[dst_offset - 2] != static_cast<UInt8>(trailing_char_str.front()))
|
||||
{
|
||||
dst_data[dst_offset - 1] = trailing_char_str.front();
|
||||
dst_data[dst_offset] = 0;
|
||||
|
@ -132,7 +132,7 @@ public:
|
||||
if (count_positive == 0 || count_positive == size)
|
||||
return std::numeric_limits<ResultType>::quiet_NaN();
|
||||
|
||||
return ResultType(area) / count_positive / (size - count_positive);
|
||||
return static_cast<ResultType>(area) / count_positive / (size - count_positive);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -183,7 +183,7 @@ struct ArrayAggregateImpl
|
||||
{
|
||||
size_t array_size = offsets[i] - pos;
|
||||
/// Just multiply the value by array size.
|
||||
res[i] = x * ResultType(array_size);
|
||||
res[i] = x * static_cast<ResultType>(array_size);
|
||||
}
|
||||
else if constexpr (aggregate_operation == AggregateOperation::min ||
|
||||
aggregate_operation == AggregateOperation::max)
|
||||
|
@ -152,7 +152,7 @@ public:
|
||||
|
||||
void update()
|
||||
{
|
||||
sink_null_map[index] = bool(src_null_map);
|
||||
sink_null_map[index] = static_cast<bool>(src_null_map);
|
||||
++index;
|
||||
}
|
||||
|
||||
@ -492,7 +492,7 @@ ColumnPtr FunctionArrayElement::executeNumberConst(
|
||||
/// arr[-2] is the element at offset 1 from the last and so on.
|
||||
|
||||
ArrayElementNumImpl<DataType>::template vectorConst<true>(
|
||||
col_nested->getData(), col_array->getOffsets(), -(UInt64(safeGet<Int64>(index)) + 1), col_res->getData(), builder);
|
||||
col_nested->getData(), col_array->getOffsets(), -(static_cast<UInt64>(safeGet<Int64>(index)) + 1), col_res->getData(), builder);
|
||||
}
|
||||
else
|
||||
throw Exception("Illegal type of array index", ErrorCodes::LOGICAL_ERROR);
|
||||
@ -605,7 +605,7 @@ ColumnPtr FunctionArrayElement::executeGenericConst(
|
||||
col_nested, col_array->getOffsets(), safeGet<UInt64>(index) - 1, *col_res, builder);
|
||||
else if (index.getType() == Field::Types::Int64)
|
||||
ArrayElementGenericImpl::vectorConst<true>(
|
||||
col_nested, col_array->getOffsets(), -(UInt64(safeGet<Int64>(index) + 1)), *col_res, builder);
|
||||
col_nested, col_array->getOffsets(), -(static_cast<UInt64>(safeGet<Int64>(index) + 1)), *col_res, builder);
|
||||
else
|
||||
throw Exception("Illegal type of array index", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
|
@ -112,7 +112,7 @@ bool FunctionArrayReverse::executeGeneric(const IColumn & src_data, const Column
|
||||
{
|
||||
ssize_t src_index = src_array_offsets[i] - 1;
|
||||
|
||||
while (src_index >= ssize_t(src_prev_offset))
|
||||
while (src_index >= static_cast<ssize_t>(src_prev_offset))
|
||||
{
|
||||
res_data.insertFrom(src_data, src_index);
|
||||
--src_index;
|
||||
|
@ -72,7 +72,7 @@ struct BitShiftLeftImpl
|
||||
if (shift_left_bits)
|
||||
{
|
||||
/// The left b bit of the right byte is moved to the right b bit of this byte
|
||||
*out = UInt8(UInt8(*(op_pointer) >> (8 - shift_left_bits)) | previous);
|
||||
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer) >> (8 - shift_left_bits)) | previous);
|
||||
previous = *op_pointer << shift_left_bits;
|
||||
}
|
||||
else
|
||||
@ -131,7 +131,7 @@ struct BitShiftLeftImpl
|
||||
if (op_pointer + 1 < end)
|
||||
{
|
||||
/// The left b bit of the right byte is moved to the right b bit of this byte
|
||||
*out = UInt8(UInt8(*(op_pointer + 1) >> (8 - shift_left_bits)) | *out);
|
||||
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer + 1) >> (8 - shift_left_bits)) | *out);
|
||||
}
|
||||
op_pointer++;
|
||||
out++;
|
||||
|
@ -41,7 +41,7 @@ struct BitShiftRightImpl
|
||||
if (op_pointer - 1 >= begin)
|
||||
{
|
||||
/// The right b bit of the left byte is moved to the left b bit of this byte
|
||||
*out = UInt8(UInt8(*(op_pointer - 1) << (8 - shift_right_bits)) | *out);
|
||||
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer - 1) << (8 - shift_right_bits)) | *out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -235,8 +235,8 @@ private:
|
||||
template <typename TransformX, typename TransformY, typename T1, typename T2>
|
||||
Int64 calculate(const TransformX & transform_x, const TransformY & transform_y, T1 x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y) const
|
||||
{
|
||||
return Int64(transform_y.execute(y, timezone_y))
|
||||
- Int64(transform_x.execute(x, timezone_x));
|
||||
return static_cast<Int64>(transform_y.execute(y, timezone_y))
|
||||
- static_cast<Int64>(transform_x.execute(x, timezone_x));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -150,7 +150,7 @@ ColumnPtr FunctionHasColumnInTable::executeImpl(const ColumnsWithTypeAndName & a
|
||||
has_column = remote_columns.hasPhysical(column_name);
|
||||
}
|
||||
|
||||
return DataTypeUInt8().createColumnConst(input_rows_count, Field{UInt64(has_column)});
|
||||
return DataTypeUInt8().createColumnConst(input_rows_count, Field{static_cast<UInt64>(has_column)});
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ inline int32_t JumpConsistentHash(uint64_t key, int32_t num_buckets)
|
||||
{
|
||||
b = j;
|
||||
key = key * 2862933555777941757ULL + 1;
|
||||
j = static_cast<int64_t>((b + 1) * (double(1LL << 31) / double((key >> 33) + 1)));
|
||||
j = static_cast<int64_t>((b + 1) * (static_cast<double>(1LL << 31) / static_cast<double>((key >> 33) + 1)));
|
||||
}
|
||||
return static_cast<int32_t>(b);
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ public:
|
||||
}
|
||||
if (size <= 0)
|
||||
return;
|
||||
if (size > Int64(input_rows_count))
|
||||
if (size > static_cast<Int64>(input_rows_count))
|
||||
size = input_rows_count;
|
||||
|
||||
if (!src)
|
||||
@ -163,14 +163,14 @@ public:
|
||||
}
|
||||
else if (offset > 0)
|
||||
{
|
||||
insert_range_from(source_is_constant, source_column_casted, offset, Int64(input_rows_count) - offset);
|
||||
insert_range_from(default_is_constant, default_column_casted, Int64(input_rows_count) - offset, offset);
|
||||
insert_range_from(source_is_constant, source_column_casted, offset, static_cast<Int64>(input_rows_count) - offset);
|
||||
insert_range_from(default_is_constant, default_column_casted, static_cast<Int64>(input_rows_count) - offset, offset);
|
||||
return result_column;
|
||||
}
|
||||
else
|
||||
{
|
||||
insert_range_from(default_is_constant, default_column_casted, 0, -offset);
|
||||
insert_range_from(source_is_constant, source_column_casted, 0, Int64(input_rows_count) + offset);
|
||||
insert_range_from(source_is_constant, source_column_casted, 0, static_cast<Int64>(input_rows_count) + offset);
|
||||
return result_column;
|
||||
}
|
||||
}
|
||||
@ -188,7 +188,7 @@ public:
|
||||
|
||||
Int64 src_idx = row + offset;
|
||||
|
||||
if (src_idx >= 0 && src_idx < Int64(input_rows_count))
|
||||
if (src_idx >= 0 && src_idx < static_cast<Int64>(input_rows_count))
|
||||
result_column->insertFrom(*source_column_casted, source_is_constant ? 0 : src_idx);
|
||||
else if (has_defaults)
|
||||
result_column->insertFrom(*default_column_casted, default_is_constant ? 0 : row);
|
||||
|
@ -30,7 +30,7 @@ using FunctionSigmoid = FunctionMathUnary<Impl>;
|
||||
|
||||
#else
|
||||
|
||||
static double sigmoid(double x)
|
||||
double sigmoid(double x)
|
||||
{
|
||||
return 1.0 / (1.0 + exp(-x));
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ inline bool HadoopSnappyDecoder::checkBufferLength(int max) const
|
||||
|
||||
inline bool HadoopSnappyDecoder::checkAvailIn(size_t avail_in, int min)
|
||||
{
|
||||
return avail_in >= size_t(min);
|
||||
return avail_in >= static_cast<size_t>(min);
|
||||
}
|
||||
|
||||
inline void HadoopSnappyDecoder::copyToBuffer(size_t * avail_in, const char ** next_in)
|
||||
|
@ -245,7 +245,7 @@ void PeekableReadBuffer::resizeOwnMemoryIfNecessary(size_t bytes_to_append)
|
||||
|
||||
/// Stack memory is not enough, allocate larger buffer.
|
||||
use_stack_memory = false;
|
||||
memory.resize(std::max(size_t(DBMS_DEFAULT_BUFFER_SIZE), new_size));
|
||||
memory.resize(std::max(static_cast<size_t>(DBMS_DEFAULT_BUFFER_SIZE), new_size));
|
||||
memcpy(memory.data(), stack_memory, sizeof(stack_memory));
|
||||
if (need_update_checkpoint)
|
||||
checkpoint.emplace(memory.data() + offset);
|
||||
|
@ -16,13 +16,13 @@ off_t ReadBufferFromMemory::seek(off_t offset, int whence)
|
||||
{
|
||||
pos = internal_buffer.begin() + offset;
|
||||
working_buffer = internal_buffer; /// We need to restore `working_buffer` in case the position was at EOF before this seek().
|
||||
return size_t(pos - internal_buffer.begin());
|
||||
return static_cast<size_t>(pos - internal_buffer.begin());
|
||||
}
|
||||
else
|
||||
throw Exception(
|
||||
"Seek position is out of bounds. "
|
||||
"Offset: "
|
||||
+ std::to_string(offset) + ", Max: " + std::to_string(size_t(internal_buffer.end() - internal_buffer.begin())),
|
||||
+ std::to_string(offset) + ", Max: " + std::to_string(static_cast<size_t>(internal_buffer.end() - internal_buffer.begin())),
|
||||
ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
|
||||
}
|
||||
else if (whence == SEEK_CUR)
|
||||
@ -32,13 +32,13 @@ off_t ReadBufferFromMemory::seek(off_t offset, int whence)
|
||||
{
|
||||
pos = new_pos;
|
||||
working_buffer = internal_buffer; /// We need to restore `working_buffer` in case the position was at EOF before this seek().
|
||||
return size_t(pos - internal_buffer.begin());
|
||||
return static_cast<size_t>(pos - internal_buffer.begin());
|
||||
}
|
||||
else
|
||||
throw Exception(
|
||||
"Seek position is out of bounds. "
|
||||
"Offset: "
|
||||
+ std::to_string(offset) + ", Max: " + std::to_string(size_t(internal_buffer.end() - internal_buffer.begin())),
|
||||
+ std::to_string(offset) + ", Max: " + std::to_string(static_cast<size_t>(internal_buffer.end() - internal_buffer.begin())),
|
||||
ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
|
||||
}
|
||||
else
|
||||
|
@ -184,7 +184,7 @@ off_t ReadBufferFromS3::seek(off_t offset_, int whence)
|
||||
if (!restricted_seek)
|
||||
{
|
||||
if (!working_buffer.empty()
|
||||
&& size_t(offset_) >= offset - working_buffer.size()
|
||||
&& static_cast<size_t>(offset_) >= offset - working_buffer.size()
|
||||
&& offset_ < offset)
|
||||
{
|
||||
pos = working_buffer.end() - (offset - offset_);
|
||||
|
@ -368,7 +368,7 @@ void WriteBufferFromS3::completeMultipartUpload()
|
||||
void WriteBufferFromS3::makeSinglepartUpload()
|
||||
{
|
||||
auto size = temporary_buffer->tellp();
|
||||
bool with_pool = bool(schedule);
|
||||
bool with_pool = static_cast<bool>(schedule);
|
||||
|
||||
LOG_TRACE(log, "Making single part upload. Bucket: {}, Key: {}, Size: {}, WithPool: {}", bucket, key, size, with_pool);
|
||||
|
||||
@ -456,7 +456,7 @@ void WriteBufferFromS3::fillPutRequest(Aws::S3::Model::PutObjectRequest & req)
|
||||
void WriteBufferFromS3::processPutRequest(PutObjectTask & task)
|
||||
{
|
||||
auto outcome = client_ptr->PutObject(task.req);
|
||||
bool with_pool = bool(schedule);
|
||||
bool with_pool = static_cast<bool>(schedule);
|
||||
|
||||
if (outcome.IsSuccess())
|
||||
LOG_TRACE(log, "Single part upload has completed. Bucket: {}, Key: {}, Object size: {}, WithPool: {}", bucket, key, task.req.GetContentLength(), with_pool);
|
||||
|
@ -59,13 +59,13 @@ Field zeroField(const Field & value)
|
||||
{
|
||||
switch (value.getType())
|
||||
{
|
||||
case Field::Types::UInt64: return UInt64(0);
|
||||
case Field::Types::Int64: return Int64(0);
|
||||
case Field::Types::Float64: return Float64(0);
|
||||
case Field::Types::UInt128: return UInt128(0);
|
||||
case Field::Types::Int128: return Int128(0);
|
||||
case Field::Types::UInt256: return UInt256(0);
|
||||
case Field::Types::Int256: return Int256(0);
|
||||
case Field::Types::UInt64: return static_cast<UInt64>(0);
|
||||
case Field::Types::Int64: return static_cast<Int64>(0);
|
||||
case Field::Types::Float64: return static_cast<Float64>(0);
|
||||
case Field::Types::UInt128: return static_cast<UInt128>(0);
|
||||
case Field::Types::Int128: return static_cast<Int128>(0);
|
||||
case Field::Types::UInt256: return static_cast<UInt256>(0);
|
||||
case Field::Types::Int256: return static_cast<Int256>(0);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
|
||||
if (server_protocol_revision < DBMS_MIN_REVISION_WITH_CLIENT_INFO)
|
||||
throw Exception("Logical error: method ClientInfo::write is called for unsupported server revision", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
writeBinary(UInt8(query_kind), out);
|
||||
writeBinary(static_cast<UInt8>(query_kind), out);
|
||||
if (empty())
|
||||
return;
|
||||
|
||||
@ -35,7 +35,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
|
||||
if (server_protocol_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME)
|
||||
writeBinary(initial_query_start_time_microseconds, out);
|
||||
|
||||
writeBinary(UInt8(interface), out);
|
||||
writeBinary(static_cast<UInt8>(interface), out);
|
||||
|
||||
if (interface == Interface::TCP)
|
||||
{
|
||||
@ -48,7 +48,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
|
||||
}
|
||||
else if (interface == Interface::HTTP)
|
||||
{
|
||||
writeBinary(UInt8(http_method), out);
|
||||
writeBinary(static_cast<UInt8>(http_method), out);
|
||||
writeBinary(http_user_agent, out);
|
||||
|
||||
if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_X_FORWARDED_FOR_IN_CLIENT_INFO)
|
||||
@ -86,7 +86,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
|
||||
else
|
||||
{
|
||||
// Don't have OpenTelemetry header.
|
||||
writeBinary(uint8_t(0), out);
|
||||
writeBinary(static_cast<UInt8>(0), out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ void collectCrashLog(Int32 signal, UInt64 thread_id, const String & query_id, co
|
||||
|
||||
stack_trace.toStringEveryLine([&trace_full](const std::string & line) { trace_full.push_back(line); });
|
||||
|
||||
CrashLogElement element{time_t(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
|
||||
CrashLogElement element{static_cast<time_t>(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
|
||||
crash_log_owned->add(element);
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ void DNSCacheUpdater::run()
|
||||
* - automatically throttle when DNS requests take longer time;
|
||||
* - add natural randomization on huge clusters - avoid sending all requests at the same moment of time from different servers.
|
||||
*/
|
||||
task_handle->scheduleAfter(size_t(update_period_seconds) * 1000);
|
||||
task_handle->scheduleAfter(static_cast<size_t>(update_period_seconds) * 1000);
|
||||
}
|
||||
|
||||
void DNSCacheUpdater::start()
|
||||
|
@ -1398,15 +1398,17 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain
|
||||
if (!select_query->limitBy())
|
||||
return false;
|
||||
|
||||
ExpressionActionsChain::Step & step = chain.lastStep(aggregated_columns);
|
||||
/// Use columns for ORDER BY.
|
||||
/// They could be required to do ORDER BY on the initiator in case of distributed queries.
|
||||
ExpressionActionsChain::Step & step = chain.lastStep(chain.getLastStep().getRequiredColumns());
|
||||
|
||||
getRootActions(select_query->limitBy(), only_types, step.actions());
|
||||
|
||||
NameSet aggregated_names;
|
||||
for (const auto & column : aggregated_columns)
|
||||
NameSet existing_column_names;
|
||||
for (const auto & column : chain.getLastStep().getRequiredColumns())
|
||||
{
|
||||
step.addRequiredOutput(column.name);
|
||||
aggregated_names.insert(column.name);
|
||||
existing_column_names.insert(column.name);
|
||||
}
|
||||
|
||||
auto & children = select_query->limitBy()->children;
|
||||
@ -1416,7 +1418,7 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain
|
||||
replaceForPositionalArguments(child, select_query, ASTSelectQuery::Expression::LIMIT_BY);
|
||||
|
||||
auto child_name = child->getColumnName();
|
||||
if (!aggregated_names.contains(child_name))
|
||||
if (!existing_column_names.contains(child_name))
|
||||
step.addRequiredOutput(child_name);
|
||||
}
|
||||
|
||||
|
@ -325,7 +325,6 @@ public:
|
||||
bool hasConstAggregationKeys() const { return has_const_aggregation_keys; }
|
||||
const AggregateDescriptions & aggregates() const { return aggregate_descriptions; }
|
||||
|
||||
const PreparedSets & getPreparedSets() const { return prepared_sets; }
|
||||
std::unique_ptr<QueryPlan> getJoinedPlan();
|
||||
|
||||
/// Tables that will need to be sent to remote servers for distributed query processing.
|
||||
|
@ -48,7 +48,7 @@ BlockIO InterpreterCheckQuery::execute()
|
||||
{
|
||||
bool result = std::all_of(check_results.begin(), check_results.end(), [] (const CheckResult & res) { return res.success; });
|
||||
auto column = ColumnUInt8::create();
|
||||
column->insertValue(UInt64(result));
|
||||
column->insertValue(static_cast<UInt64>(result));
|
||||
block = Block{{std::move(column), std::make_shared<DataTypeUInt8>(), "result"}};
|
||||
}
|
||||
else
|
||||
|
@ -380,7 +380,7 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
pipeline.dropTotalsAndExtremes();
|
||||
|
||||
if (table->supportsParallelInsert() && settings.max_insert_threads > 1)
|
||||
out_streams_size = std::min(size_t(settings.max_insert_threads), pipeline.getNumStreams());
|
||||
out_streams_size = std::min(static_cast<size_t>(settings.max_insert_threads), pipeline.getNumStreams());
|
||||
|
||||
pipeline.resize(out_streams_size);
|
||||
|
||||
|
@ -160,17 +160,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const SelectQueryOptions & options_,
|
||||
const Names & required_result_column_names_)
|
||||
: InterpreterSelectQuery(query_ptr_, context_, std::nullopt, nullptr, options_, required_result_column_names_)
|
||||
{
|
||||
}
|
||||
|
||||
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
ContextPtr context_,
|
||||
const SelectQueryOptions & options_,
|
||||
PreparedSets prepared_sets_)
|
||||
: InterpreterSelectQuery(query_ptr_, context_, std::nullopt, nullptr, options_, {}, {}, std::move(prepared_sets_))
|
||||
{
|
||||
}
|
||||
{}
|
||||
|
||||
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
@ -189,6 +179,16 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
: InterpreterSelectQuery(query_ptr_, context_, std::nullopt, storage_, options_.copy().noSubquery(), {}, metadata_snapshot_)
|
||||
{}
|
||||
|
||||
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
ContextPtr context_,
|
||||
const SelectQueryOptions & options_,
|
||||
SubqueriesForSets subquery_for_sets_,
|
||||
PreparedSets prepared_sets_)
|
||||
: InterpreterSelectQuery(
|
||||
query_ptr_, context_, std::nullopt, nullptr, options_, {}, {}, std::move(subquery_for_sets_), std::move(prepared_sets_))
|
||||
{}
|
||||
|
||||
InterpreterSelectQuery::~InterpreterSelectQuery() = default;
|
||||
|
||||
|
||||
@ -275,6 +275,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const SelectQueryOptions & options_,
|
||||
const Names & required_result_column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot_,
|
||||
SubqueriesForSets subquery_for_sets_,
|
||||
PreparedSets prepared_sets_)
|
||||
/// NOTE: the query almost always should be cloned because it will be modified during analysis.
|
||||
: IInterpreterUnionOrSelectQuery(options_.modify_inplace ? query_ptr_ : query_ptr_->clone(), context_, options_)
|
||||
@ -282,6 +283,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
, input_pipe(std::move(input_pipe_))
|
||||
, log(&Poco::Logger::get("InterpreterSelectQuery"))
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, subquery_for_sets(std::move(subquery_for_sets_))
|
||||
, prepared_sets(std::move(prepared_sets_))
|
||||
{
|
||||
checkStackSize();
|
||||
@ -404,9 +406,6 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
if (storage)
|
||||
view = dynamic_cast<StorageView *>(storage.get());
|
||||
|
||||
/// Reuse already built sets for multiple passes of analysis
|
||||
SubqueriesForSets subquery_for_sets;
|
||||
|
||||
auto analyze = [&] (bool try_move_to_prewhere)
|
||||
{
|
||||
/// Allow push down and other optimizations for VIEW: replace with subquery and rewrite it.
|
||||
@ -570,7 +569,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
|
||||
/// Reuse already built sets for multiple passes of analysis
|
||||
subquery_for_sets = std::move(query_analyzer->getSubqueriesForSets());
|
||||
prepared_sets = query_info.sets.empty() ? query_analyzer->getPreparedSets() : query_info.sets;
|
||||
prepared_sets = std::move(query_analyzer->getPreparedSets());
|
||||
|
||||
/// Do not try move conditions to PREWHERE for the second time.
|
||||
/// Otherwise, we won't be able to fallback from inefficient PREWHERE to WHERE later.
|
||||
@ -654,9 +653,14 @@ Block InterpreterSelectQuery::getSampleBlockImpl()
|
||||
auto & query = getSelectQuery();
|
||||
query_analyzer->makeSetsForIndex(query.where());
|
||||
query_analyzer->makeSetsForIndex(query.prewhere());
|
||||
query_info.sets = query_analyzer->getPreparedSets();
|
||||
query_info.sets = std::move(query_analyzer->getPreparedSets());
|
||||
query_info.subquery_for_sets = std::move(query_analyzer->getSubqueriesForSets());
|
||||
|
||||
from_stage = storage->getQueryProcessingStage(context, options.to_stage, storage_snapshot, query_info);
|
||||
|
||||
/// query_info.sets is used for further set index analysis. Use copy instead of move.
|
||||
query_analyzer->getPreparedSets() = query_info.sets;
|
||||
query_analyzer->getSubqueriesForSets() = std::move(query_info.subquery_for_sets);
|
||||
}
|
||||
|
||||
/// Do I need to perform the first part of the pipeline?
|
||||
|
@ -67,11 +67,13 @@ public:
|
||||
const StorageMetadataPtr & metadata_snapshot_ = nullptr,
|
||||
const SelectQueryOptions & = {});
|
||||
|
||||
/// Read data not from the table specified in the query, but from the specified `storage_`.
|
||||
/// Reuse existing subqueries_for_sets and prepared_sets for another pass of analysis. It's used for projection.
|
||||
/// TODO: Find a general way of sharing sets among different interpreters, such as subqueries.
|
||||
InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
ContextPtr context_,
|
||||
const SelectQueryOptions &,
|
||||
SubqueriesForSets subquery_for_sets_,
|
||||
PreparedSets prepared_sets_);
|
||||
|
||||
~InterpreterSelectQuery() override;
|
||||
@ -115,6 +117,7 @@ private:
|
||||
const SelectQueryOptions &,
|
||||
const Names & required_result_column_names = {},
|
||||
const StorageMetadataPtr & metadata_snapshot_ = nullptr,
|
||||
SubqueriesForSets subquery_for_sets_ = {},
|
||||
PreparedSets prepared_sets_ = {});
|
||||
|
||||
ASTSelectQuery & getSelectQuery() { return query_ptr->as<ASTSelectQuery &>(); }
|
||||
@ -207,6 +210,7 @@ private:
|
||||
StorageSnapshotPtr storage_snapshot;
|
||||
|
||||
/// Reuse already built sets for multiple passes of analysis, possibly across interpreters.
|
||||
SubqueriesForSets subquery_for_sets;
|
||||
PreparedSets prepared_sets;
|
||||
};
|
||||
|
||||
|
@ -104,7 +104,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
}
|
||||
else if (settings.offset)
|
||||
{
|
||||
ASTPtr new_limit_offset_ast = std::make_shared<ASTLiteral>(Field(UInt64(settings.offset)));
|
||||
ASTPtr new_limit_offset_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(settings.offset)));
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_OFFSET, std::move(new_limit_offset_ast));
|
||||
}
|
||||
|
||||
@ -115,15 +115,15 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
|
||||
UInt64 new_limit_length = 0;
|
||||
if (settings.offset == 0)
|
||||
new_limit_length = std::min(limit_length, UInt64(settings.limit));
|
||||
new_limit_length = std::min(limit_length, static_cast<UInt64>(settings.limit));
|
||||
else if (settings.offset < limit_length)
|
||||
new_limit_length = settings.limit ? std::min(UInt64(settings.limit), limit_length - settings.offset) : (limit_length - settings.offset);
|
||||
new_limit_length = settings.limit ? std::min(static_cast<UInt64>(settings.limit), limit_length - settings.offset) : (limit_length - settings.offset);
|
||||
|
||||
limit_length_ast->as<ASTLiteral &>().value = Field(new_limit_length);
|
||||
}
|
||||
else if (settings.limit)
|
||||
{
|
||||
ASTPtr new_limit_length_ast = std::make_shared<ASTLiteral>(Field(UInt64(settings.limit)));
|
||||
ASTPtr new_limit_length_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(settings.limit)));
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(new_limit_length_ast));
|
||||
}
|
||||
|
||||
|
@ -588,7 +588,7 @@ void InterpreterSystemQuery::restartReplicas(ContextMutablePtr system_context)
|
||||
for (auto & guard : guards)
|
||||
guard.second = catalog.getDDLGuard(guard.first.database_name, guard.first.table_name);
|
||||
|
||||
ThreadPool pool(std::min(size_t(getNumberOfPhysicalCPUCores()), replica_names.size()));
|
||||
ThreadPool pool(std::min(static_cast<size_t>(getNumberOfPhysicalCPUCores()), replica_names.size()));
|
||||
|
||||
for (auto & replica : replica_names)
|
||||
{
|
||||
|
@ -325,7 +325,7 @@ static ASTPtr getPartitionPolicy(const NamesAndTypesList & primary_keys)
|
||||
return std::make_shared<ASTIdentifier>(column_name);
|
||||
|
||||
return makeASTFunction("intDiv", std::make_shared<ASTIdentifier>(column_name),
|
||||
std::make_shared<ASTLiteral>(UInt64(type_max_size / 1000)));
|
||||
std::make_shared<ASTLiteral>(static_cast<UInt64>(type_max_size / 1000)));
|
||||
};
|
||||
|
||||
ASTPtr best_partition;
|
||||
@ -493,7 +493,7 @@ ASTs InterpreterCreateImpl::getRewrittenQueries(
|
||||
String sign_column_name = getUniqueColumnName(columns_name_and_type, "_sign");
|
||||
String version_column_name = getUniqueColumnName(columns_name_and_type, "_version");
|
||||
columns->set(columns->columns, InterpreterCreateQuery::formatColumns(columns_description));
|
||||
columns->columns->children.emplace_back(create_materialized_column_declaration(sign_column_name, "Int8", UInt64(1)));
|
||||
columns->columns->children.emplace_back(create_materialized_column_declaration(sign_column_name, "Int8", static_cast<UInt64>(1)));
|
||||
columns->columns->children.emplace_back(create_materialized_column_declaration(version_column_name, "UInt64", UInt64(1)));
|
||||
|
||||
/// Add minmax skipping index for _version column.
|
||||
|
@ -71,7 +71,7 @@ static void dumpProfileEvents(ProfileEventsSnapshot const & snapshot, DB::Mutabl
|
||||
{
|
||||
size_t i = 0;
|
||||
columns[i++]->insertData(host_name.data(), host_name.size());
|
||||
columns[i++]->insert(UInt64(snapshot.current_time));
|
||||
columns[i++]->insert(static_cast<UInt64>(snapshot.current_time));
|
||||
columns[i++]->insert(UInt64{snapshot.thread_id});
|
||||
columns[i++]->insert(Type::INCREMENT);
|
||||
}
|
||||
@ -81,8 +81,8 @@ static void dumpMemoryTracker(ProfileEventsSnapshot const & snapshot, DB::Mutabl
|
||||
{
|
||||
size_t i = 0;
|
||||
columns[i++]->insertData(host_name.data(), host_name.size());
|
||||
columns[i++]->insert(UInt64(snapshot.current_time));
|
||||
columns[i++]->insert(UInt64{snapshot.thread_id});
|
||||
columns[i++]->insert(static_cast<UInt64>(snapshot.current_time));
|
||||
columns[i++]->insert(static_cast<UInt64>(snapshot.thread_id));
|
||||
columns[i++]->insert(Type::GAUGE);
|
||||
|
||||
columns[i++]->insertData(MemoryTracker::USAGE_EVENT_NAME, strlen(MemoryTracker::USAGE_EVENT_NAME));
|
||||
|
@ -279,7 +279,7 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo
|
||||
columns[i++]->insert(client_info.initial_query_start_time);
|
||||
columns[i++]->insert(client_info.initial_query_start_time_microseconds);
|
||||
|
||||
columns[i++]->insert(UInt64(client_info.interface));
|
||||
columns[i++]->insert(static_cast<UInt64>(client_info.interface));
|
||||
columns[i++]->insert(static_cast<UInt64>(client_info.is_secure));
|
||||
|
||||
columns[i++]->insert(client_info.os_user);
|
||||
@ -290,7 +290,7 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo
|
||||
columns[i++]->insert(client_info.client_version_minor);
|
||||
columns[i++]->insert(client_info.client_version_patch);
|
||||
|
||||
columns[i++]->insert(UInt64(client_info.http_method));
|
||||
columns[i++]->insert(static_cast<UInt64>(client_info.http_method));
|
||||
columns[i++]->insert(client_info.http_user_agent);
|
||||
columns[i++]->insert(client_info.http_referer);
|
||||
columns[i++]->insert(client_info.forwarded_for);
|
||||
|
@ -105,7 +105,9 @@ NamesAndTypesList SessionLogElement::getNamesAndTypes()
|
||||
{"HTTP", static_cast<Int8>(Interface::HTTP)},
|
||||
{"gRPC", static_cast<Int8>(Interface::GRPC)},
|
||||
{"MySQL", static_cast<Int8>(Interface::MYSQL)},
|
||||
{"PostgreSQL", static_cast<Int8>(Interface::POSTGRESQL)}
|
||||
{"PostgreSQL", static_cast<Int8>(Interface::POSTGRESQL)},
|
||||
{"LOCAL", static_cast<Int8>(Interface::LOCAL)},
|
||||
{"TCP_INTERSERVER", static_cast<Int8>(Interface::TCP_INTERSERVER)}
|
||||
});
|
||||
|
||||
auto lc_string_datatype = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||
|
@ -49,7 +49,7 @@ void ThreadStatus::applyQuerySettings()
|
||||
initQueryProfiler();
|
||||
|
||||
untracked_memory_limit = settings.max_untracked_memory;
|
||||
if (settings.memory_profiler_step && settings.memory_profiler_step < UInt64(untracked_memory_limit))
|
||||
if (settings.memory_profiler_step && settings.memory_profiler_step < static_cast<UInt64>(untracked_memory_limit))
|
||||
untracked_memory_limit = settings.memory_profiler_step;
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
|
@ -84,7 +84,7 @@ void TraceCollector::run()
|
||||
{
|
||||
uintptr_t addr = 0;
|
||||
readPODBinary(addr, in);
|
||||
trace.emplace_back(UInt64(addr));
|
||||
trace.emplace_back(static_cast<UInt64>(addr));
|
||||
}
|
||||
|
||||
TraceType trace_type;
|
||||
@ -103,8 +103,8 @@ void TraceCollector::run()
|
||||
struct timespec ts;
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
|
||||
UInt64 time = UInt64(ts.tv_sec * 1000000000LL + ts.tv_nsec);
|
||||
UInt64 time_in_microseconds = UInt64((ts.tv_sec * 1000000LL) + (ts.tv_nsec / 1000));
|
||||
UInt64 time = static_cast<UInt64>(ts.tv_sec * 1000000000LL + ts.tv_nsec);
|
||||
UInt64 time_in_microseconds = static_cast<UInt64>((ts.tv_sec * 1000000LL) + (ts.tv_nsec / 1000));
|
||||
TraceLogElement element{time_t(time / 1000000000), time_in_microseconds, time, trace_type, thread_id, query_id, trace, size};
|
||||
trace_log->add(element);
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ void appendUnusedGroupByColumn(ASTSelectQuery * select_query)
|
||||
/// Also start unused_column integer must not intersect with ([1, source_columns.size()])
|
||||
/// might be in positional GROUP BY.
|
||||
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, std::make_shared<ASTExpressionList>());
|
||||
select_query->groupBy()->children.emplace_back(std::make_shared<ASTLiteral>(Int64(-1)));
|
||||
select_query->groupBy()->children.emplace_back(std::make_shared<ASTLiteral>(static_cast<Int64>(-1)));
|
||||
}
|
||||
|
||||
/// Eliminates injective function calls and constant expressions from group by statement.
|
||||
|
@ -294,7 +294,7 @@ struct ExistsExpressionData
|
||||
select_query->setExpression(ASTSelectQuery::Expression::SELECT, select_expr_list);
|
||||
select_query->setExpression(ASTSelectQuery::Expression::TABLES, tables_in_select);
|
||||
|
||||
ASTPtr limit_length_ast = std::make_shared<ASTLiteral>(Field(UInt64(1)));
|
||||
ASTPtr limit_length_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(limit_length_ast));
|
||||
|
||||
auto select_with_union_query = std::make_shared<ASTSelectWithUnionQuery>();
|
||||
@ -347,7 +347,7 @@ void replaceWithSumCount(String column_name, ASTFunction & func)
|
||||
/// Rewrite "avg" to sumCount().1 / sumCount().2
|
||||
auto new_arg1 = makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(UInt8(1)));
|
||||
auto new_arg2 = makeASTFunction("CAST",
|
||||
makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(UInt8(2))),
|
||||
makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(static_cast<UInt8>(2))),
|
||||
std::make_shared<ASTLiteral>("Float64"));
|
||||
|
||||
func.name = "divide";
|
||||
|
@ -892,7 +892,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
ReadableSize(elem.read_bytes / elapsed_seconds));
|
||||
}
|
||||
|
||||
if (log_queries && elem.type >= log_queries_min_type && Int64(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||
{
|
||||
if (auto query_log = context->getQueryLog())
|
||||
query_log->add(elem);
|
||||
@ -1009,7 +1009,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
logException(context, elem);
|
||||
|
||||
/// In case of exception we log internal queries also
|
||||
if (log_queries && elem.type >= log_queries_min_type && Int64(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||
{
|
||||
if (auto query_log = context->getQueryLog())
|
||||
query_log->add(elem);
|
||||
|
@ -36,6 +36,7 @@ namespace
|
||||
String auth_type_name = AuthenticationTypeInfo::get(auth_type).name;
|
||||
String value_prefix;
|
||||
std::optional<String> value;
|
||||
std::optional<String> salt;
|
||||
const boost::container::flat_set<String> * values = nullptr;
|
||||
|
||||
if (show_password ||
|
||||
@ -56,6 +57,10 @@ namespace
|
||||
auth_type_name = "sha256_hash";
|
||||
value_prefix = "BY";
|
||||
value = auth_data.getPasswordHashHex();
|
||||
if (!auth_data.getSalt().empty())
|
||||
{
|
||||
salt = auth_data.getSalt();
|
||||
}
|
||||
break;
|
||||
}
|
||||
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
|
||||
@ -107,6 +112,8 @@ namespace
|
||||
if (value)
|
||||
{
|
||||
settings.ostr << " " << quoteString(*value);
|
||||
if (salt)
|
||||
settings.ostr << " SALT " << quoteString(*salt);
|
||||
}
|
||||
else if (values)
|
||||
{
|
||||
|
@ -16,7 +16,12 @@
|
||||
#include <base/range.h>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <base/insertAtEnd.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include <Common/hex.h>
|
||||
#if USE_SSL
|
||||
# include <openssl/crypto.h>
|
||||
# include <openssl/rand.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -34,7 +39,7 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
bool parseAuthenticationData(IParserBase::Pos & pos, Expected & expected, AuthenticationData & auth_data)
|
||||
bool parseAuthenticationData(IParserBase::Pos & pos, Expected & expected, bool id_mode, AuthenticationData & auth_data)
|
||||
{
|
||||
return IParserBase::wrapParseImpl(pos, [&]
|
||||
{
|
||||
@ -99,14 +104,22 @@ namespace
|
||||
}
|
||||
|
||||
String value;
|
||||
String parsed_salt;
|
||||
boost::container::flat_set<String> common_names;
|
||||
if (expect_password || expect_hash)
|
||||
{
|
||||
ASTPtr ast;
|
||||
if (!ParserKeyword{"BY"}.ignore(pos, expected) || !ParserStringLiteral{}.parse(pos, ast, expected))
|
||||
return false;
|
||||
|
||||
value = ast->as<const ASTLiteral &>().value.safeGet<String>();
|
||||
|
||||
if (id_mode && expect_hash)
|
||||
{
|
||||
if (ParserKeyword{"SALT"}.ignore(pos, expected) && ParserStringLiteral{}.parse(pos, ast, expected))
|
||||
{
|
||||
parsed_salt = ast->as<const ASTLiteral &>().value.safeGet<String>();
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (expect_ldap_server_name)
|
||||
{
|
||||
@ -141,6 +154,34 @@ namespace
|
||||
}
|
||||
|
||||
auth_data = AuthenticationData{*type};
|
||||
if (auth_data.getType() == AuthenticationType::SHA256_PASSWORD)
|
||||
{
|
||||
if (!parsed_salt.empty())
|
||||
{
|
||||
auth_data.setSalt(parsed_salt);
|
||||
}
|
||||
else if (expect_password)
|
||||
{
|
||||
#if USE_SSL
|
||||
///generate and add salt here
|
||||
///random generator FIPS complaint
|
||||
uint8_t key[32];
|
||||
RAND_bytes(key, sizeof(key));
|
||||
String salt;
|
||||
salt.resize(sizeof(key) * 2);
|
||||
char * buf_pos = salt.data();
|
||||
for (uint8_t k : key)
|
||||
{
|
||||
writeHexByteUppercase(k, buf_pos);
|
||||
buf_pos += 2;
|
||||
}
|
||||
value.append(salt);
|
||||
auth_data.setSalt(salt);
|
||||
#else
|
||||
///if USE_SSL is not defined, Exception thrown later
|
||||
#endif
|
||||
}
|
||||
}
|
||||
if (expect_password)
|
||||
auth_data.setPassword(value);
|
||||
else if (expect_hash)
|
||||
@ -393,7 +434,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
if (!auth_data)
|
||||
{
|
||||
AuthenticationData new_auth_data;
|
||||
if (parseAuthenticationData(pos, expected, new_auth_data))
|
||||
if (parseAuthenticationData(pos, expected, attach_mode, new_auth_data))
|
||||
{
|
||||
auth_data = std::move(new_auth_data);
|
||||
continue;
|
||||
|
@ -298,28 +298,69 @@ namespace
|
||||
{
|
||||
bool parseCastAs(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
/// expr AS type
|
||||
/** Possible variants for cast operator cast(expr [[AS] alias_1] AS Type), cast(expr [[AS] alias_1], type_expr [[as] alias_2]).
|
||||
* First try to match with cast(expr [[AS] alias_1] AS Type)
|
||||
* Then try to match with cast(expr [[AS] alias_1], type_expr [[as] alias_2]).
|
||||
*/
|
||||
|
||||
ASTPtr expr_node;
|
||||
ASTPtr type_node;
|
||||
ASTPtr identifier_node;
|
||||
|
||||
if (ParserExpression().parse(pos, expr_node, expected))
|
||||
{
|
||||
if (ParserKeyword("AS").ignore(pos, expected))
|
||||
ParserKeyword as_keyword_parser("AS");
|
||||
bool parse_as = as_keyword_parser.ignore(pos, expected);
|
||||
|
||||
/// CAST (a b AS UInt32) OR CAST (a b, expr)
|
||||
|
||||
if (!parse_as && ParserIdentifier().parse(pos, identifier_node, expected))
|
||||
{
|
||||
if (ParserDataType().parse(pos, type_node, expected))
|
||||
expr_node->setAlias(getIdentifierName(identifier_node));
|
||||
parse_as = as_keyword_parser.ignore(pos, expected);
|
||||
}
|
||||
|
||||
if (parse_as)
|
||||
{
|
||||
/// CAST (a AS Type) OR CAST (a AS b AS Type) OR CAST (a AS b, expr)
|
||||
|
||||
auto begin = pos;
|
||||
auto expected_copy = expected;
|
||||
bool next_identifier = ParserIdentifier().ignore(begin, expected_copy);
|
||||
bool next_identifier_with_comma = next_identifier && ParserToken(TokenType::Comma).ignore(begin, expected_copy);
|
||||
bool next_identifier_with_as
|
||||
= next_identifier && !next_identifier_with_comma && as_keyword_parser.ignore(begin, expected_copy);
|
||||
|
||||
if (next_identifier_with_as)
|
||||
{
|
||||
if (ParserIdentifier().parse(pos, identifier_node, expected) && as_keyword_parser.ignore(pos, expected))
|
||||
expr_node->setAlias(getIdentifierName(identifier_node));
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!next_identifier_with_comma && ParserDataType().parse(pos, type_node, expected))
|
||||
{
|
||||
node = createFunctionCast(expr_node, type_node);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else if (ParserToken(TokenType::Comma).ignore(pos, expected))
|
||||
|
||||
/// CAST(a AS b, expr)
|
||||
|
||||
if (parse_as)
|
||||
{
|
||||
if (ParserExpression().parse(pos, type_node, expected))
|
||||
{
|
||||
node = makeASTFunction("CAST", expr_node, type_node);
|
||||
return true;
|
||||
}
|
||||
if (ParserIdentifier().parse(pos, identifier_node, expected))
|
||||
expr_node->setAlias(getIdentifierName(identifier_node));
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ParserToken(TokenType::Comma).ignore(pos, expected)
|
||||
&& ParserExpressionWithOptionalAlias(true /*allow_alias_without_as_keyword*/).parse(pos, type_node, expected))
|
||||
{
|
||||
node = makeASTFunction("CAST", expr_node, type_node);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -331,6 +372,9 @@ namespace
|
||||
/// Either SUBSTRING(expr FROM start) or SUBSTRING(expr FROM start FOR length) or SUBSTRING(expr, start, length)
|
||||
/// The latter will be parsed normally as a function later.
|
||||
|
||||
ParserKeyword as_keyword_parser("AS");
|
||||
ParserIdentifier identifier_parser;
|
||||
|
||||
ASTPtr expr_node;
|
||||
ASTPtr start_node;
|
||||
ASTPtr length_node;
|
||||
@ -338,35 +382,65 @@ namespace
|
||||
if (!ParserExpression().parse(pos, expr_node, expected))
|
||||
return false;
|
||||
|
||||
if (pos->type != TokenType::Comma)
|
||||
auto from_keyword_parser = ParserKeyword("FROM");
|
||||
bool from_exists = from_keyword_parser.check(pos, expected);
|
||||
|
||||
if (!from_exists && pos->type != TokenType::Comma)
|
||||
{
|
||||
if (!ParserKeyword("FROM").ignore(pos, expected))
|
||||
ASTPtr identifier_node;
|
||||
bool parsed_as = as_keyword_parser.ignore(pos, expected);
|
||||
bool parsed_identifer = identifier_parser.parse(pos, identifier_node, expected);
|
||||
|
||||
if (parsed_as && !parsed_identifer)
|
||||
return false;
|
||||
|
||||
if (parsed_identifer)
|
||||
expr_node->setAlias(getIdentifierName(identifier_node));
|
||||
|
||||
from_exists = from_keyword_parser.check(pos, expected);
|
||||
}
|
||||
else
|
||||
|
||||
if (pos->type == TokenType::Comma)
|
||||
{
|
||||
if (from_exists)
|
||||
return false;
|
||||
|
||||
++pos;
|
||||
}
|
||||
|
||||
if (!ParserExpression().parse(pos, start_node, expected))
|
||||
return false;
|
||||
|
||||
if (pos->type != TokenType::ClosingRoundBracket)
|
||||
auto for_keyword_parser = ParserKeyword("FOR");
|
||||
bool for_exists = for_keyword_parser.check(pos, expected);
|
||||
if (!for_exists && pos->type != TokenType::Comma)
|
||||
{
|
||||
if (pos->type != TokenType::Comma)
|
||||
{
|
||||
if (!ParserKeyword("FOR").ignore(pos, expected))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
++pos;
|
||||
}
|
||||
|
||||
if (!ParserExpression().parse(pos, length_node, expected))
|
||||
ASTPtr identifier_node;
|
||||
bool parsed_as = as_keyword_parser.ignore(pos, expected);
|
||||
bool parsed_identifer = identifier_parser.parse(pos, identifier_node, expected);
|
||||
if (parsed_as && !parsed_identifer)
|
||||
return false;
|
||||
|
||||
if (parsed_identifer)
|
||||
start_node->setAlias(getIdentifierName(identifier_node));
|
||||
|
||||
for_exists = for_keyword_parser.check(pos, expected);
|
||||
}
|
||||
|
||||
bool need_parse_length_expression = for_exists;
|
||||
if (pos->type == TokenType::Comma)
|
||||
{
|
||||
if (for_exists)
|
||||
return false;
|
||||
|
||||
++pos;
|
||||
need_parse_length_expression = true;
|
||||
}
|
||||
|
||||
if (need_parse_length_expression
|
||||
&& !ParserExpressionWithOptionalAlias(true /*allow_alias_without_as_keyword*/).parse(pos, length_node, expected))
|
||||
return false;
|
||||
|
||||
/// Convert to canonical representation in functional form: SUBSTRING(expr, start, length)
|
||||
if (length_node)
|
||||
node = makeASTFunction("substring", expr_node, start_node, length_node);
|
||||
@ -378,7 +452,7 @@ namespace
|
||||
|
||||
bool parseTrim(bool trim_left, bool trim_right, IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
/// Handles all possible TRIM/LTRIM/RTRIM call variants
|
||||
/// Handles all possible TRIM/LTRIM/RTRIM call variants ([[LEADING|TRAILING|BOTH] trim_character FROM] input_string)
|
||||
|
||||
std::string func_name;
|
||||
bool char_override = false;
|
||||
@ -414,7 +488,26 @@ namespace
|
||||
{
|
||||
if (!ParserExpression().parse(pos, to_remove, expected))
|
||||
return false;
|
||||
if (!ParserKeyword("FROM").ignore(pos, expected))
|
||||
|
||||
auto from_keyword_parser = ParserKeyword("FROM");
|
||||
bool from_exists = from_keyword_parser.check(pos, expected);
|
||||
|
||||
if (!from_exists)
|
||||
{
|
||||
ASTPtr identifier_node;
|
||||
bool parsed_as = ParserKeyword("AS").ignore(pos, expected);
|
||||
bool parsed_identifer = ParserIdentifier().parse(pos, identifier_node, expected);
|
||||
|
||||
if (parsed_as && !parsed_identifer)
|
||||
return false;
|
||||
|
||||
if (parsed_identifer)
|
||||
to_remove->setAlias(getIdentifierName(identifier_node));
|
||||
|
||||
from_exists = from_keyword_parser.check(pos, expected);
|
||||
}
|
||||
|
||||
if (!from_exists)
|
||||
return false;
|
||||
|
||||
auto quote_meta_func_node = std::make_shared<ASTFunction>();
|
||||
@ -429,7 +522,7 @@ namespace
|
||||
}
|
||||
}
|
||||
|
||||
if (!ParserExpression().parse(pos, expr_node, expected))
|
||||
if (!ParserExpressionWithOptionalAlias(true /*allow_alias_without_as_keyword*/).parse(pos, expr_node, expected))
|
||||
return false;
|
||||
|
||||
/// Convert to regexp replace function call
|
||||
@ -440,33 +533,24 @@ namespace
|
||||
auto pattern_list_args = std::make_shared<ASTExpressionList>();
|
||||
if (trim_left && trim_right)
|
||||
{
|
||||
pattern_list_args->children = {
|
||||
std::make_shared<ASTLiteral>("^["),
|
||||
to_remove,
|
||||
std::make_shared<ASTLiteral>("]+|["),
|
||||
to_remove,
|
||||
std::make_shared<ASTLiteral>("]+$")
|
||||
};
|
||||
pattern_list_args->children
|
||||
= {std::make_shared<ASTLiteral>("^["),
|
||||
to_remove,
|
||||
std::make_shared<ASTLiteral>("]+|["),
|
||||
to_remove,
|
||||
std::make_shared<ASTLiteral>("]+$")};
|
||||
func_name = "replaceRegexpAll";
|
||||
}
|
||||
else
|
||||
{
|
||||
if (trim_left)
|
||||
{
|
||||
pattern_list_args->children = {
|
||||
std::make_shared<ASTLiteral>("^["),
|
||||
to_remove,
|
||||
std::make_shared<ASTLiteral>("]+")
|
||||
};
|
||||
pattern_list_args->children = {std::make_shared<ASTLiteral>("^["), to_remove, std::make_shared<ASTLiteral>("]+")};
|
||||
}
|
||||
else
|
||||
{
|
||||
/// trim_right == false not possible
|
||||
pattern_list_args->children = {
|
||||
std::make_shared<ASTLiteral>("["),
|
||||
to_remove,
|
||||
std::make_shared<ASTLiteral>("]+$")
|
||||
};
|
||||
pattern_list_args->children = {std::make_shared<ASTLiteral>("["), to_remove, std::make_shared<ASTLiteral>("]+$")};
|
||||
}
|
||||
func_name = "replaceRegexpOne";
|
||||
}
|
||||
@ -506,6 +590,9 @@ namespace
|
||||
|
||||
bool parseExtract(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
/// First try to match with date extract operator EXTRACT(part FROM date)
|
||||
/// Then with function extract(haystack, pattern)
|
||||
|
||||
IParser::Pos begin = pos;
|
||||
IntervalKind interval_kind;
|
||||
|
||||
@ -514,7 +601,7 @@ namespace
|
||||
ASTPtr expr;
|
||||
|
||||
ParserKeyword s_from("FROM");
|
||||
ParserExpression elem_parser;
|
||||
ParserExpressionWithOptionalAlias elem_parser(true /*allow_alias_without_as_keyword*/);
|
||||
|
||||
if (s_from.ignore(pos, expected) && elem_parser.parse(pos, expr, expected))
|
||||
{
|
||||
@ -526,7 +613,7 @@ namespace
|
||||
pos = begin;
|
||||
|
||||
ASTPtr expr_list;
|
||||
if (!ParserExpressionList(false, false).parse(pos, expr_list, expected))
|
||||
if (!ParserExpressionList(true /*allow_alias_without_as_keyword*/).parse(pos, expr_list, expected))
|
||||
return false;
|
||||
|
||||
auto res = std::make_shared<ASTFunction>();
|
||||
@ -539,28 +626,57 @@ namespace
|
||||
|
||||
bool parsePosition(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ASTPtr expr_list_node;
|
||||
if (!ParserExpressionList(false, false).parse(pos, expr_list_node, expected))
|
||||
return false;
|
||||
/// First try to match with position(needle IN haystack)
|
||||
/// Then with position(haystack, needle[, start_pos])
|
||||
|
||||
ASTExpressionList * expr_list = typeid_cast<ASTExpressionList *>(expr_list_node.get());
|
||||
if (expr_list && expr_list->children.size() == 1)
|
||||
ParserExpressionWithOptionalAlias expr_parser(true /*allow_alias_without_as_keyword*/);
|
||||
|
||||
ASTPtr first_arg_expr_node;
|
||||
if (!expr_parser.parse(pos, first_arg_expr_node, expected))
|
||||
{
|
||||
ASTFunction * func_in = typeid_cast<ASTFunction *>(expr_list->children[0].get());
|
||||
if (func_in && func_in->name == "in")
|
||||
return false;
|
||||
}
|
||||
|
||||
ASTFunction * func_in = typeid_cast<ASTFunction *>(first_arg_expr_node.get());
|
||||
if (func_in && func_in->name == "in")
|
||||
{
|
||||
ASTExpressionList * in_args = typeid_cast<ASTExpressionList *>(func_in->arguments.get());
|
||||
if (in_args && in_args->children.size() == 2)
|
||||
{
|
||||
ASTExpressionList * in_args = typeid_cast<ASTExpressionList *>(func_in->arguments.get());
|
||||
if (in_args && in_args->children.size() == 2)
|
||||
{
|
||||
node = makeASTFunction("position", in_args->children[1], in_args->children[0]);
|
||||
return true;
|
||||
}
|
||||
node = makeASTFunction("position", in_args->children[1], in_args->children[0]);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (pos->type != TokenType::Comma)
|
||||
return false;
|
||||
++pos;
|
||||
|
||||
ASTPtr second_arg_expr_node;
|
||||
if (!expr_parser.parse(pos, second_arg_expr_node, expected))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
ASTPtr start_pos_expr_node;
|
||||
if (pos->type == TokenType::Comma)
|
||||
{
|
||||
++pos;
|
||||
|
||||
if (!expr_parser.parse(pos, start_pos_expr_node, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
auto arguments = std::make_shared<ASTExpressionList>();
|
||||
arguments->children.push_back(std::move(first_arg_expr_node));
|
||||
arguments->children.push_back(std::move(second_arg_expr_node));
|
||||
|
||||
if (start_pos_expr_node)
|
||||
arguments->children.push_back(std::move(start_pos_expr_node));
|
||||
|
||||
auto res = std::make_shared<ASTFunction>();
|
||||
res->name = "position";
|
||||
res->arguments = expr_list_node;
|
||||
res->arguments = std::move(arguments);
|
||||
res->children.push_back(res->arguments);
|
||||
node = std::move(res);
|
||||
return true;
|
||||
@ -568,6 +684,9 @@ namespace
|
||||
|
||||
bool parseDateAdd(const char * function_name, IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
/// First to match with function(unit, offset, timestamp)
|
||||
/// Then with function(offset, timestamp)
|
||||
|
||||
ASTPtr timestamp_node;
|
||||
ASTPtr offset_node;
|
||||
|
||||
@ -575,19 +694,18 @@ namespace
|
||||
ASTPtr interval_func_node;
|
||||
if (parseIntervalKind(pos, expected, interval_kind))
|
||||
{
|
||||
/// function(unit, offset, timestamp)
|
||||
if (pos->type != TokenType::Comma)
|
||||
return false;
|
||||
++pos;
|
||||
|
||||
if (!ParserExpression().parse(pos, offset_node, expected))
|
||||
if (!ParserExpressionWithOptionalAlias(true /*allow_alias_without_as_keyword*/).parse(pos, offset_node, expected))
|
||||
return false;
|
||||
|
||||
if (pos->type != TokenType::Comma)
|
||||
return false;
|
||||
++pos;
|
||||
|
||||
if (!ParserExpression().parse(pos, timestamp_node, expected))
|
||||
if (!ParserExpressionWithOptionalAlias(true /*allow_alias_without_as_keyword*/).parse(pos, timestamp_node, expected))
|
||||
return false;
|
||||
auto interval_expr_list_args = std::make_shared<ASTExpressionList>();
|
||||
interval_expr_list_args->children = {offset_node};
|
||||
@ -600,7 +718,7 @@ namespace
|
||||
else
|
||||
{
|
||||
ASTPtr expr_list;
|
||||
if (!ParserExpressionList(false, false).parse(pos, expr_list, expected))
|
||||
if (!ParserExpressionList(true /*allow_alias_without_as_keyword*/).parse(pos, expr_list, expected))
|
||||
return false;
|
||||
|
||||
auto res = std::make_shared<ASTFunction>();
|
||||
@ -617,39 +735,59 @@ namespace
|
||||
|
||||
bool parseDateDiff(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
/// First to match with dateDiff(unit, startdate, enddate, [timezone])
|
||||
/// Then with dateDiff('unit', startdate, enddate, [timezone])
|
||||
|
||||
ASTPtr left_node;
|
||||
ASTPtr right_node;
|
||||
|
||||
IntervalKind interval_kind;
|
||||
if (!parseIntervalKind(pos, expected, interval_kind))
|
||||
if (parseIntervalKind(pos, expected, interval_kind))
|
||||
{
|
||||
ASTPtr expr_list;
|
||||
if (!ParserExpressionList(false, false).parse(pos, expr_list, expected))
|
||||
if (pos->type != TokenType::Comma)
|
||||
return false;
|
||||
++pos;
|
||||
|
||||
if (!ParserExpressionWithOptionalAlias(true /*allow_alias_without_as_keyword*/).parse(pos, left_node, expected))
|
||||
return false;
|
||||
|
||||
auto res = std::make_shared<ASTFunction>();
|
||||
res->name = "dateDiff";
|
||||
res->arguments = expr_list;
|
||||
res->children.push_back(res->arguments);
|
||||
node = std::move(res);
|
||||
if (pos->type != TokenType::Comma)
|
||||
return false;
|
||||
++pos;
|
||||
|
||||
if (!ParserExpressionWithOptionalAlias(true /*allow_alias_without_as_keyword*/).parse(pos, right_node, expected))
|
||||
return false;
|
||||
|
||||
ASTPtr timezone_node;
|
||||
|
||||
if (pos->type == TokenType::Comma)
|
||||
{
|
||||
/// Optional timezone
|
||||
++pos;
|
||||
|
||||
if (!ParserExpressionWithOptionalAlias(true /*allow_alias_without_as_keyword*/).parse(pos, timezone_node, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
auto interval_literal = std::make_shared<ASTLiteral>(interval_kind.toDateDiffUnit());
|
||||
if (timezone_node)
|
||||
node = makeASTFunction("dateDiff", std::move(interval_literal), std::move(left_node), std::move(right_node), std::move(timezone_node));
|
||||
else
|
||||
node = makeASTFunction("dateDiff", std::move(interval_literal), std::move(left_node), std::move(right_node));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pos->type != TokenType::Comma)
|
||||
return false;
|
||||
++pos;
|
||||
|
||||
if (!ParserExpression().parse(pos, left_node, expected))
|
||||
ASTPtr expr_list;
|
||||
if (!ParserExpressionList(true /*allow_alias_without_as_keyword*/).parse(pos, expr_list, expected))
|
||||
return false;
|
||||
|
||||
if (pos->type != TokenType::Comma)
|
||||
return false;
|
||||
++pos;
|
||||
auto res = std::make_shared<ASTFunction>();
|
||||
res->name = "dateDiff";
|
||||
res->arguments = expr_list;
|
||||
res->children.push_back(res->arguments);
|
||||
node = std::move(res);
|
||||
|
||||
if (!ParserExpression().parse(pos, right_node, expected))
|
||||
return false;
|
||||
|
||||
node = makeASTFunction("dateDiff", std::make_shared<ASTLiteral>(interval_kind.toDateDiffUnit()), left_node, right_node);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -85,13 +85,13 @@ ASTPtr ASTDeclareOptions::clone() const
|
||||
|
||||
bool ParserAlwaysTrue::parseImpl(IParser::Pos & /*pos*/, ASTPtr & node, Expected & /*expected*/)
|
||||
{
|
||||
node = std::make_shared<ASTLiteral>(Field(UInt64(1)));
|
||||
node = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ParserAlwaysFalse::parseImpl(IParser::Pos & /*pos*/, ASTPtr & node, Expected & /*expected*/)
|
||||
{
|
||||
node = std::make_shared<ASTLiteral>(Field(UInt64(0)));
|
||||
node = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(0)));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -391,7 +391,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
|
||||
/// Transform `DISTINCT ON expr` to `LIMIT 1 BY expr`
|
||||
limit_by_expression_list = distinct_on_expression_list;
|
||||
limit_by_length = std::make_shared<ASTLiteral>(Field{UInt8(1)});
|
||||
limit_by_length = std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
|
||||
distinct_on_expression_list = nullptr;
|
||||
}
|
||||
|
||||
|
@ -30,9 +30,9 @@ bool ParserSetQuery::parseNameValuePair(SettingChange & change, IParser::Pos & p
|
||||
return false;
|
||||
|
||||
if (ParserKeyword("TRUE").ignore(pos, expected))
|
||||
value = std::make_shared<ASTLiteral>(Field(UInt64(1)));
|
||||
value = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
|
||||
else if (ParserKeyword("FALSE").ignore(pos, expected))
|
||||
value = std::make_shared<ASTLiteral>(Field(UInt64(0)));
|
||||
value = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(0)));
|
||||
else if (!value_p.parse(pos, value, expected))
|
||||
return false;
|
||||
|
||||
|
@ -22,9 +22,9 @@ ASTPtr makeASTForLogicalAnd(ASTs && arguments)
|
||||
});
|
||||
|
||||
if (!partial_result)
|
||||
return std::make_shared<ASTLiteral>(Field{UInt8(0)});
|
||||
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(0)});
|
||||
if (arguments.empty())
|
||||
return std::make_shared<ASTLiteral>(Field{UInt8(1)});
|
||||
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
|
||||
if (arguments.size() == 1)
|
||||
return arguments[0];
|
||||
|
||||
@ -51,9 +51,9 @@ ASTPtr makeASTForLogicalOr(ASTs && arguments)
|
||||
});
|
||||
|
||||
if (partial_result)
|
||||
return std::make_shared<ASTLiteral>(Field{UInt8(1)});
|
||||
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
|
||||
if (arguments.empty())
|
||||
return std::make_shared<ASTLiteral>(Field{UInt8(0)});
|
||||
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(0)});
|
||||
if (arguments.size() == 1)
|
||||
return arguments[0];
|
||||
|
||||
|
@ -9,9 +9,9 @@
|
||||
#include <Parsers/ParserQueryWithOutput.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
|
||||
#include "Access/AccessEntityIO.h"
|
||||
#include <string_view>
|
||||
|
||||
#include <regex>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace
|
||||
@ -20,6 +20,7 @@ using namespace DB;
|
||||
using namespace std::literals;
|
||||
}
|
||||
|
||||
|
||||
struct ParserTestCase
|
||||
{
|
||||
const std::string_view input_text;
|
||||
@ -48,9 +49,32 @@ TEST_P(ParserTest, parseQuery)
|
||||
|
||||
if (expected_ast)
|
||||
{
|
||||
ASTPtr ast;
|
||||
ASSERT_NO_THROW(ast = parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0));
|
||||
EXPECT_EQ(expected_ast, serializeAST(*ast->clone(), false));
|
||||
if (std::string(expected_ast).starts_with("throws"))
|
||||
{
|
||||
EXPECT_THROW(parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0), DB::Exception);
|
||||
}
|
||||
else
|
||||
{
|
||||
ASTPtr ast;
|
||||
ASSERT_NO_THROW(ast = parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0));
|
||||
if (std::string("CREATE USER or ALTER USER query") != parser->getName()
|
||||
&& std::string("ATTACH access entity query") != parser->getName())
|
||||
{
|
||||
EXPECT_EQ(expected_ast, serializeAST(*ast->clone(), false));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (input_text.starts_with("ATTACH"))
|
||||
{
|
||||
auto salt = (dynamic_cast<const ASTCreateUserQuery *>(ast.get())->auth_data)->getSalt();
|
||||
EXPECT_TRUE(std::regex_match(salt, std::regex(expected_ast)));
|
||||
}
|
||||
else
|
||||
{
|
||||
EXPECT_TRUE(std::regex_match(serializeAST(*ast->clone(), false), std::regex(expected_ast)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -226,3 +250,35 @@ INSTANTIATE_TEST_SUITE_P(ParserCreateDatabaseQuery, ParserTest,
|
||||
"CREATE DATABASE db\nENGINE = Foo\nSETTINGS a = 1, b = 2\nTABLE OVERRIDE `a`\n(\n ORDER BY (`id`, `version`)\n)\nCOMMENT 'db comment'"
|
||||
}
|
||||
})));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(ParserCreateUserQuery, ParserTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(std::make_shared<ParserCreateUserQuery>()),
|
||||
::testing::ValuesIn(std::initializer_list<ParserTestCase>{
|
||||
{
|
||||
"CREATE USER user1 IDENTIFIED WITH sha256_password BY 'qwe123'",
|
||||
"CREATE USER user1 IDENTIFIED WITH sha256_hash BY '[A-Za-z0-9]{64}' SALT '[A-Za-z0-9]{64}'"
|
||||
},
|
||||
{
|
||||
"ALTER USER user1 IDENTIFIED WITH sha256_password BY 'qwe123'",
|
||||
"ALTER USER user1 IDENTIFIED WITH sha256_hash BY '[A-Za-z0-9]{64}' SALT '[A-Za-z0-9]{64}'"
|
||||
},
|
||||
{
|
||||
"CREATE USER user1 IDENTIFIED WITH sha256_password BY 'qwe123' SALT 'EFFD7F6B03B3EA68B8F86C1E91614DD50E42EB31EF7160524916444D58B5E264'",
|
||||
"throws Syntax error"
|
||||
}
|
||||
})));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(ParserAttachUserQuery, ParserTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(std::make_shared<ParserAttachAccessEntity>()),
|
||||
::testing::ValuesIn(std::initializer_list<ParserTestCase>{
|
||||
{
|
||||
"ATTACH USER user1 IDENTIFIED WITH sha256_hash BY '2CC4880302693485717D34E06046594CFDFE425E3F04AA5A094C4AABAB3CB0BF' SALT 'EFFD7F6B03B3EA68B8F86C1E91614DD50E42EB31EF7160524916444D58B5E264';",
|
||||
"^[A-Za-z0-9]{64}$"
|
||||
},
|
||||
{
|
||||
"ATTACH USER user1 IDENTIFIED WITH sha256_hash BY '2CC4880302693485717D34E06046594CFDFE425E3F04AA5A094C4AABAB3CB0BF'", //for users created in older releases that sha256_password has no salt
|
||||
"^$"
|
||||
}
|
||||
})));
|
||||
|
@ -14,10 +14,12 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_PARSE_QUOTED_STRING;
|
||||
extern const int CANNOT_PARSE_DATE;
|
||||
extern const int CANNOT_PARSE_DATETIME;
|
||||
extern const int CANNOT_READ_ARRAY_FROM_TEXT;
|
||||
extern const int CANNOT_READ_ALL_DATA;
|
||||
extern const int CANNOT_PARSE_NUMBER;
|
||||
extern const int CANNOT_PARSE_BOOL;
|
||||
extern const int CANNOT_PARSE_UUID;
|
||||
extern const int CANNOT_READ_ARRAY_FROM_TEXT;
|
||||
extern const int CANNOT_READ_MAP_FROM_TEXT;
|
||||
extern const int CANNOT_READ_ALL_DATA;
|
||||
extern const int TOO_LARGE_STRING_SIZE;
|
||||
extern const int INCORRECT_NUMBER_OF_COLUMNS;
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
@ -32,9 +34,11 @@ bool isParseError(int code)
|
||||
|| code == ErrorCodes::CANNOT_PARSE_QUOTED_STRING
|
||||
|| code == ErrorCodes::CANNOT_PARSE_DATE
|
||||
|| code == ErrorCodes::CANNOT_PARSE_DATETIME
|
||||
|| code == ErrorCodes::CANNOT_READ_ARRAY_FROM_TEXT
|
||||
|| code == ErrorCodes::CANNOT_PARSE_NUMBER
|
||||
|| code == ErrorCodes::CANNOT_PARSE_UUID
|
||||
|| code == ErrorCodes::CANNOT_PARSE_BOOL
|
||||
|| code == ErrorCodes::CANNOT_READ_ARRAY_FROM_TEXT
|
||||
|| code == ErrorCodes::CANNOT_READ_MAP_FROM_TEXT
|
||||
|| code == ErrorCodes::CANNOT_READ_ALL_DATA
|
||||
|| code == ErrorCodes::TOO_LARGE_STRING_SIZE
|
||||
|| code == ErrorCodes::ARGUMENT_OUT_OF_BOUND /// For Decimals
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user