diff --git a/.github/ISSUE_TEMPLATE/40_bug-report.md b/.github/ISSUE_TEMPLATE/40_bug-report.md index 4dfd19266d0..5c8611d47e6 100644 --- a/.github/ISSUE_TEMPLATE/40_bug-report.md +++ b/.github/ISSUE_TEMPLATE/40_bug-report.md @@ -7,15 +7,29 @@ assignees: '' --- -(you don't have to strictly follow this form) +You have to provide the following information whenever possible. **Describe the bug** + A clear and concise description of what works not as it is supposed to. **Does it reproduce on recent release?** + [The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv) +**Enable crash reporting** + +If possible, change "enabled" to true in "send_crash_reports" section in `config.xml`: + +``` + + + + false +``` + **How to reproduce** + * Which ClickHouse server version to use * Which interface to use, if matters * Non-default settings, if any @@ -24,10 +38,13 @@ A clear and concise description of what works not as it is supposed to. * Queries to run that lead to unexpected result **Expected behavior** + A clear and concise description of what you expected to happen. **Error message and/or stacktrace** + If applicable, add screenshots to help explain your problem. **Additional context** + Add any other context about the problem here. diff --git a/.gitignore b/.gitignore index d33dbf0600d..1db6e0a78c9 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ /docs/zh/single.md /docs/ja/single.md /docs/fa/single.md +/docs/en/development/cmake-in-clickhouse.md # callgrind files callgrind.out.* diff --git a/.gitmodules b/.gitmodules index f9bc8a56a5c..2ccce88e5e4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -17,6 +17,7 @@ [submodule "contrib/zlib-ng"] path = contrib/zlib-ng url = https://github.com/ClickHouse-Extras/zlib-ng.git + branch = clickhouse-new [submodule "contrib/googletest"] path = contrib/googletest url = https://github.com/google/googletest.git @@ -133,7 +134,7 @@ url = https://github.com/unicode-org/icu.git [submodule "contrib/flatbuffers"] path = contrib/flatbuffers - url = https://github.com/google/flatbuffers.git + url = https://github.com/ClickHouse-Extras/flatbuffers.git [submodule "contrib/libc-headers"] path = contrib/libc-headers url = https://github.com/ClickHouse-Extras/libc-headers.git @@ -221,3 +222,13 @@ [submodule "contrib/NuRaft"] path = contrib/NuRaft url = https://github.com/ClickHouse-Extras/NuRaft.git +[submodule "contrib/nanodbc"] + path = contrib/nanodbc + url = https://github.com/ClickHouse-Extras/nanodbc.git +[submodule "contrib/datasketches-cpp"] + path = contrib/datasketches-cpp + url = https://github.com/ClickHouse-Extras/datasketches-cpp.git + +[submodule "contrib/yaml-cpp"] + path = contrib/yaml-cpp + url = https://github.com/ClickHouse-Extras/yaml-cpp.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 43531b60267..2eaecaa4c9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,295 @@ +## ClickHouse release 21.5, 2021-05-20 + +#### Backward Incompatible Change + +* Change comparison of integers and floating point numbers when integer is not exactly representable in the floating point data type. In new version comparison will return false as the rounding error will occur. Example: `9223372036854775808.0 != 9223372036854775808`, because the number `9223372036854775808` is not representable as floating point number exactly (and `9223372036854775808.0` is rounded to `9223372036854776000.0`). But in previous version the comparison will return as the numbers are equal, because if the floating point number `9223372036854776000.0` get converted back to UInt64, it will yield `9223372036854775808`. For the reference, the Python programming language also treats these numbers as equal. But this behaviour was dependend on CPU model (different results on AMD64 and AArch64 for some out-of-range numbers), so we make the comparison more precise. It will treat int and float numbers equal only if int is represented in floating point type exactly. [#22595](https://github.com/ClickHouse/ClickHouse/pull/22595) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Remove support for `argMin` and `argMax` for single `Tuple` argument. The code was not memory-safe. The feature was added by mistake and it is confusing for people. These functions can be reintroduced under different names later. This fixes [#22384](https://github.com/ClickHouse/ClickHouse/issues/22384) and reverts [#17359](https://github.com/ClickHouse/ClickHouse/issues/17359). [#23393](https://github.com/ClickHouse/ClickHouse/pull/23393) ([alexey-milovidov](https://github.com/alexey-milovidov)). + +#### New Feature + +* Added functions `dictGetChildren(dictionary, key)`, `dictGetDescendants(dictionary, key, level)`. Function `dictGetChildren` return all children as an array if indexes. It is a inverse transformation for `dictGetHierarchy`. Function `dictGetDescendants` return all descendants as if `dictGetChildren` was applied `level` times recursively. Zero `level` value is equivalent to infinity. Improved performance of `dictGetHierarchy`, `dictIsIn` functions. Closes [#14656](https://github.com/ClickHouse/ClickHouse/issues/14656). [#22096](https://github.com/ClickHouse/ClickHouse/pull/22096) ([Maksim Kita](https://github.com/kitaisreal)). +* Added function `dictGetOrNull`. It works like `dictGet`, but return `Null` in case key was not found in dictionary. Closes [#22375](https://github.com/ClickHouse/ClickHouse/issues/22375). [#22413](https://github.com/ClickHouse/ClickHouse/pull/22413) ([Maksim Kita](https://github.com/kitaisreal)). +* Added a table function `s3Cluster`, which allows to process files from `s3` in parallel on every node of a specified cluster. [#22012](https://github.com/ClickHouse/ClickHouse/pull/22012) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Added support for replicas and shards in MySQL/PostgreSQL table engine / table function. You can write `SELECT * FROM mysql('host{1,2}-{1|2}', ...)`. Closes [#20969](https://github.com/ClickHouse/ClickHouse/issues/20969). [#22217](https://github.com/ClickHouse/ClickHouse/pull/22217) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Added `ALTER TABLE ... FETCH PART ...` query. It's similar to `FETCH PARTITION`, but fetches only one part. [#22706](https://github.com/ClickHouse/ClickHouse/pull/22706) ([turbo jason](https://github.com/songenjie)). +* Added a setting `max_distributed_depth` that limits the depth of recursive queries to `Distributed` tables. Closes [#20229](https://github.com/ClickHouse/ClickHouse/issues/20229). [#21942](https://github.com/ClickHouse/ClickHouse/pull/21942) ([flynn](https://github.com/ucasFL)). + +#### Performance Improvement + +* Improved performance of `intDiv` by dynamic dispatch for AVX2. This closes [#22314](https://github.com/ClickHouse/ClickHouse/issues/22314). [#23000](https://github.com/ClickHouse/ClickHouse/pull/23000) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Improved performance of reading from `ArrowStream` input format for sources other then local file (e.g. URL). [#22673](https://github.com/ClickHouse/ClickHouse/pull/22673) ([nvartolomei](https://github.com/nvartolomei)). +* Disabled compression by default when interacting with localhost (with clickhouse-client or server to server with distributed queries) via native protocol. It may improve performance of some import/export operations. This closes [#22234](https://github.com/ClickHouse/ClickHouse/issues/22234). [#22237](https://github.com/ClickHouse/ClickHouse/pull/22237) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Exclude values that does not belong to the shard from right part of IN section for distributed queries (under `optimize_skip_unused_shards_rewrite_in`, enabled by default, since it still requires `optimize_skip_unused_shards`). [#21511](https://github.com/ClickHouse/ClickHouse/pull/21511) ([Azat Khuzhin](https://github.com/azat)). +* Improved performance of reading a subset of columns with File-like table engine and column-oriented format like Parquet, Arrow or ORC. This closes [#issue:20129](https://github.com/ClickHouse/ClickHouse/issues/20129). [#21302](https://github.com/ClickHouse/ClickHouse/pull/21302) ([keenwolf](https://github.com/keen-wolf)). +* Allow to move more conditions to `PREWHERE` as it was before version 21.1 (adjustment of internal heuristics). Insufficient number of moved condtions could lead to worse performance. [#23397](https://github.com/ClickHouse/ClickHouse/pull/23397) ([Anton Popov](https://github.com/CurtizJ)). +* Improved performance of ODBC connections and fixed all the outstanding issues from the backlog. Using `nanodbc` library instead of `Poco::ODBC`. Closes [#9678](https://github.com/ClickHouse/ClickHouse/issues/9678). Add support for DateTime64 and Decimal* for ODBC table engine. Closes [#21961](https://github.com/ClickHouse/ClickHouse/issues/21961). Fixed issue with cyrillic text being truncated. Closes [#16246](https://github.com/ClickHouse/ClickHouse/issues/16246). Added connection pools for odbc bridge. [#21972](https://github.com/ClickHouse/ClickHouse/pull/21972) ([Kseniia Sumarokova](https://github.com/kssenii)). + +#### Improvement + +* Increase `max_uri_size` (the maximum size of URL in HTTP interface) to 1 MiB by default. This closes [#21197](https://github.com/ClickHouse/ClickHouse/issues/21197). [#22997](https://github.com/ClickHouse/ClickHouse/pull/22997) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Set `background_fetches_pool_size` to `8` that is better for production usage with frequent small insertions or slow ZooKeeper cluster. [#22945](https://github.com/ClickHouse/ClickHouse/pull/22945) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* FlatDictionary added `initial_array_size`, `max_array_size` options. [#22521](https://github.com/ClickHouse/ClickHouse/pull/22521) ([Maksim Kita](https://github.com/kitaisreal)). +* Add new setting `non_replicated_deduplication_window` for non-replicated MergeTree inserts deduplication. [#22514](https://github.com/ClickHouse/ClickHouse/pull/22514) ([alesapin](https://github.com/alesapin)). +* Update paths to the `CatBoost` model configs in config reloading. [#22434](https://github.com/ClickHouse/ClickHouse/pull/22434) ([Kruglov Pavel](https://github.com/Avogar)). +* Added `Decimal256` type support in dictionaries. `Decimal256` is experimental feature. Closes [#20979](https://github.com/ClickHouse/ClickHouse/issues/20979). [#22960](https://github.com/ClickHouse/ClickHouse/pull/22960) ([Maksim Kita](https://github.com/kitaisreal)). +* Enabled `async_socket_for_remote` by default (using less amount of OS threads for distributed queries). [#23683](https://github.com/ClickHouse/ClickHouse/pull/23683) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed `quantile(s)TDigest`. Added special handling of singleton centroids according to tdunning/t-digest 3.2+. Also a bug with over-compression of centroids in implementation of earlier version of the algorithm was fixed. [#23314](https://github.com/ClickHouse/ClickHouse/pull/23314) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Make function name `unhex` case insensitive for compatibility with MySQL. [#23229](https://github.com/ClickHouse/ClickHouse/pull/23229) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Implement functions `arrayHasAny`, `arrayHasAll`, `has`, `indexOf`, `countEqual` for generic case when types of array elements are different. In previous versions the functions `arrayHasAny`, `arrayHasAll` returned false and `has`, `indexOf`, `countEqual` thrown exception. Also add support for `Decimal` and big integer types in functions `has` and similar. This closes [#20272](https://github.com/ClickHouse/ClickHouse/issues/20272). [#23044](https://github.com/ClickHouse/ClickHouse/pull/23044) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Raised the threshold on max number of matches in result of the function `extractAllGroupsHorizontal`. [#23036](https://github.com/ClickHouse/ClickHouse/pull/23036) ([Vasily Nemkov](https://github.com/Enmk)). +* Do not perform `optimize_skip_unused_shards` for cluster with one node. [#22999](https://github.com/ClickHouse/ClickHouse/pull/22999) ([Azat Khuzhin](https://github.com/azat)). +* Added ability to run clickhouse-keeper (experimental drop-in replacement to ZooKeeper) with SSL. Config settings `keeper_server.tcp_port_secure` can be used for secure interaction between client and keeper-server. `keeper_server.raft_configuration.secure` can be used to enable internal secure communication between nodes. [#22992](https://github.com/ClickHouse/ClickHouse/pull/22992) ([alesapin](https://github.com/alesapin)). +* Added ability to flush buffer only in background for `Buffer` tables. [#22986](https://github.com/ClickHouse/ClickHouse/pull/22986) ([Azat Khuzhin](https://github.com/azat)). +* When selecting from MergeTree table with NULL in WHERE condition, in rare cases, exception was thrown. This closes [#20019](https://github.com/ClickHouse/ClickHouse/issues/20019). [#22978](https://github.com/ClickHouse/ClickHouse/pull/22978) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix error handling in Poco HTTP Client for AWS. [#22973](https://github.com/ClickHouse/ClickHouse/pull/22973) ([kreuzerkrieg](https://github.com/kreuzerkrieg)). +* Respect `max_part_removal_threads` for `ReplicatedMergeTree`. [#22971](https://github.com/ClickHouse/ClickHouse/pull/22971) ([Azat Khuzhin](https://github.com/azat)). +* Fix obscure corner case of MergeTree settings inactive_parts_to_throw_insert = 0 with inactive_parts_to_delay_insert > 0. [#22947](https://github.com/ClickHouse/ClickHouse/pull/22947) ([Azat Khuzhin](https://github.com/azat)). +* `dateDiff` now works with `DateTime64` arguments (even for values outside of `DateTime` range) [#22931](https://github.com/ClickHouse/ClickHouse/pull/22931) ([Vasily Nemkov](https://github.com/Enmk)). +* MaterializeMySQL (experimental feature): added an ability to replicate MySQL databases containing views without failing. This is accomplished by ignoring the views. [#22760](https://github.com/ClickHouse/ClickHouse/pull/22760) ([Christian](https://github.com/cfroystad)). +* Allow RBAC row policy via postgresql protocol. Closes [#22658](https://github.com/ClickHouse/ClickHouse/issues/22658). PostgreSQL protocol is enabled in configuration by default. [#22755](https://github.com/ClickHouse/ClickHouse/pull/22755) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add metric to track how much time is spend during waiting for Buffer layer lock. [#22725](https://github.com/ClickHouse/ClickHouse/pull/22725) ([Azat Khuzhin](https://github.com/azat)). +* Allow to use CTE in VIEW definition. This closes [#22491](https://github.com/ClickHouse/ClickHouse/issues/22491). [#22657](https://github.com/ClickHouse/ClickHouse/pull/22657) ([Amos Bird](https://github.com/amosbird)). +* Clear the rest of the screen and show cursor in `clickhouse-client` if previous program has left garbage in terminal. This closes [#16518](https://github.com/ClickHouse/ClickHouse/issues/16518). [#22634](https://github.com/ClickHouse/ClickHouse/pull/22634) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Make `round` function to behave consistently on non-x86_64 platforms. Rounding half to nearest even (Banker's rounding) is used. [#22582](https://github.com/ClickHouse/ClickHouse/pull/22582) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Correctly check structure of blocks of data that are sending by Distributed tables. [#22325](https://github.com/ClickHouse/ClickHouse/pull/22325) ([Azat Khuzhin](https://github.com/azat)). +* Allow publishing Kafka errors to a virtual column of Kafka engine, controlled by the `kafka_handle_error_mode` setting. [#21850](https://github.com/ClickHouse/ClickHouse/pull/21850) ([fastio](https://github.com/fastio)). +* Add aliases `simpleJSONExtract/simpleJSONHas` to `visitParam/visitParamExtract{UInt, Int, Bool, Float, Raw, String}`. Fixes [#21383](https://github.com/ClickHouse/ClickHouse/issues/21383). [#21519](https://github.com/ClickHouse/ClickHouse/pull/21519) ([fastio](https://github.com/fastio)). +* Add `clickhouse-library-bridge` for library dictionary source. Closes [#9502](https://github.com/ClickHouse/ClickHouse/issues/9502). [#21509](https://github.com/ClickHouse/ClickHouse/pull/21509) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Forbid to drop a column if it's referenced by materialized view. Closes [#21164](https://github.com/ClickHouse/ClickHouse/issues/21164). [#21303](https://github.com/ClickHouse/ClickHouse/pull/21303) ([flynn](https://github.com/ucasFL)). +* Support dynamic interserver credentials (rotating credentials without downtime). [#14113](https://github.com/ClickHouse/ClickHouse/pull/14113) ([johnskopis](https://github.com/johnskopis)). +* Add support for Kafka storage with `Arrow` and `ArrowStream` format messages. [#23415](https://github.com/ClickHouse/ClickHouse/pull/23415) ([Chao Ma](https://github.com/godliness)). +* Fixed missing semicolon in exception message. The user may find this exception message unpleasant to read. [#23208](https://github.com/ClickHouse/ClickHouse/pull/23208) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fixed missing whitespace in some exception messages about `LowCardinality` type. [#23207](https://github.com/ClickHouse/ClickHouse/pull/23207) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Some values were formatted with alignment in center in table cells in `Markdown` format. Not anymore. [#23096](https://github.com/ClickHouse/ClickHouse/pull/23096) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Remove non-essential details from suggestions in clickhouse-client. This closes [#22158](https://github.com/ClickHouse/ClickHouse/issues/22158). [#23040](https://github.com/ClickHouse/ClickHouse/pull/23040) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Correct calculation of `bytes_allocated` field in system.dictionaries for sparse_hashed dictionaries. [#22867](https://github.com/ClickHouse/ClickHouse/pull/22867) ([Azat Khuzhin](https://github.com/azat)). +* Fixed approximate total rows accounting for reverse reading from MergeTree. [#22726](https://github.com/ClickHouse/ClickHouse/pull/22726) ([Azat Khuzhin](https://github.com/azat)). +* Fix the case when it was possible to configure dictionary with clickhouse source that was looking to itself that leads to infinite loop. Closes [#14314](https://github.com/ClickHouse/ClickHouse/issues/14314). [#22479](https://github.com/ClickHouse/ClickHouse/pull/22479) ([Maksim Kita](https://github.com/kitaisreal)). + +#### Bug Fix + +* Multiple fixes for hedged requests. Fixed an error `Can't initialize pipeline with empty pipe` for queries with `GLOBAL IN/JOIN` when the setting `use_hedged_requests` is enabled. Fixes [#23431](https://github.com/ClickHouse/ClickHouse/issues/23431). [#23805](https://github.com/ClickHouse/ClickHouse/pull/23805) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). Fixed a race condition in hedged connections which leads to crash. This fixes [#22161](https://github.com/ClickHouse/ClickHouse/issues/22161). [#22443](https://github.com/ClickHouse/ClickHouse/pull/22443) ([Kruglov Pavel](https://github.com/Avogar)). Fix possible crash in case if `unknown packet` was received from remote query (with `async_socket_for_remote` enabled). Fixes [#21167](https://github.com/ClickHouse/ClickHouse/issues/21167). [#23309](https://github.com/ClickHouse/ClickHouse/pull/23309) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed the behavior when disabling `input_format_with_names_use_header ` setting discards all the input with CSVWithNames format. This fixes [#22406](https://github.com/ClickHouse/ClickHouse/issues/22406). [#23202](https://github.com/ClickHouse/ClickHouse/pull/23202) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fixed remote JDBC bridge timeout connection issue. Closes [#9609](https://github.com/ClickHouse/ClickHouse/issues/9609). [#23771](https://github.com/ClickHouse/ClickHouse/pull/23771) ([Maksim Kita](https://github.com/kitaisreal), [alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix the logic of initial load of `complex_key_hashed` if `update_field` is specified. Closes [#23800](https://github.com/ClickHouse/ClickHouse/issues/23800). [#23824](https://github.com/ClickHouse/ClickHouse/pull/23824) ([Maksim Kita](https://github.com/kitaisreal)). +* Fixed crash when `PREWHERE` and row policy filter are both in effect with empty result. [#23763](https://github.com/ClickHouse/ClickHouse/pull/23763) ([Amos Bird](https://github.com/amosbird)). +* Avoid possible "Cannot schedule a task" error (in case some exception had been occurred) on INSERT into Distributed. [#23744](https://github.com/ClickHouse/ClickHouse/pull/23744) ([Azat Khuzhin](https://github.com/azat)). +* Added an exception in case of completely the same values in both samples in aggregate function `mannWhitneyUTest`. This fixes [#23646](https://github.com/ClickHouse/ClickHouse/issues/23646). [#23654](https://github.com/ClickHouse/ClickHouse/pull/23654) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fixed server fault when inserting data through HTTP caused an exception. This fixes [#23512](https://github.com/ClickHouse/ClickHouse/issues/23512). [#23643](https://github.com/ClickHouse/ClickHouse/pull/23643) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fixed misinterpretation of some `LIKE` expressions with escape sequences. [#23610](https://github.com/ClickHouse/ClickHouse/pull/23610) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fixed restart / stop command hanging. Closes [#20214](https://github.com/ClickHouse/ClickHouse/issues/20214). [#23552](https://github.com/ClickHouse/ClickHouse/pull/23552) ([filimonov](https://github.com/filimonov)). +* Fixed `COLUMNS` matcher in case of multiple JOINs in select query. Closes [#22736](https://github.com/ClickHouse/ClickHouse/issues/22736). [#23501](https://github.com/ClickHouse/ClickHouse/pull/23501) ([Maksim Kita](https://github.com/kitaisreal)). +* Fixed a crash when modifying column's default value when a column itself is used as `ReplacingMergeTree`'s parameter. [#23483](https://github.com/ClickHouse/ClickHouse/pull/23483) ([hexiaoting](https://github.com/hexiaoting)). +* Fixed corner cases in vertical merges with `ReplacingMergeTree`. In rare cases they could lead to fails of merges with exceptions like `Incomplete granules are not allowed while blocks are granules size`. [#23459](https://github.com/ClickHouse/ClickHouse/pull/23459) ([Anton Popov](https://github.com/CurtizJ)). +* Fixed bug that does not allow cast from empty array literal, to array with dimensions greater than 1, e.g. `CAST([] AS Array(Array(String)))`. Closes [#14476](https://github.com/ClickHouse/ClickHouse/issues/14476). [#23456](https://github.com/ClickHouse/ClickHouse/pull/23456) ([Maksim Kita](https://github.com/kitaisreal)). +* Fixed a bug when `deltaSum` aggregate function produced incorrect result after resetting the counter. [#23437](https://github.com/ClickHouse/ClickHouse/pull/23437) ([Russ Frank](https://github.com/rf)). +* Fixed `Cannot unlink file` error on unsuccessful creation of ReplicatedMergeTree table with multidisk configuration. This closes [#21755](https://github.com/ClickHouse/ClickHouse/issues/21755). [#23433](https://github.com/ClickHouse/ClickHouse/pull/23433) ([tavplubix](https://github.com/tavplubix)). +* Fixed incompatible constant expression generation during partition pruning based on virtual columns. This fixes https://github.com/ClickHouse/ClickHouse/pull/21401#discussion_r611888913. [#23366](https://github.com/ClickHouse/ClickHouse/pull/23366) ([Amos Bird](https://github.com/amosbird)). +* Fixed a crash when setting join_algorithm is set to 'auto' and Join is performed with a Dictionary. Close [#23002](https://github.com/ClickHouse/ClickHouse/issues/23002). [#23312](https://github.com/ClickHouse/ClickHouse/pull/23312) ([Vladimir](https://github.com/vdimir)). +* Don't relax NOT conditions during partition pruning. This fixes [#23305](https://github.com/ClickHouse/ClickHouse/issues/23305) and [#21539](https://github.com/ClickHouse/ClickHouse/issues/21539). [#23310](https://github.com/ClickHouse/ClickHouse/pull/23310) ([Amos Bird](https://github.com/amosbird)). +* Fixed very rare race condition on background cleanup of old blocks. It might cause a block not to be deduplicated if it's too close to the end of deduplication window. [#23301](https://github.com/ClickHouse/ClickHouse/pull/23301) ([tavplubix](https://github.com/tavplubix)). +* Fixed very rare (distributed) race condition between creation and removal of ReplicatedMergeTree tables. It might cause exceptions like `node doesn't exist` on attempt to create replicated table. Fixes [#21419](https://github.com/ClickHouse/ClickHouse/issues/21419). [#23294](https://github.com/ClickHouse/ClickHouse/pull/23294) ([tavplubix](https://github.com/tavplubix)). +* Fixed simple key dictionary from DDL creation if primary key is not first attribute. Fixes [#23236](https://github.com/ClickHouse/ClickHouse/issues/23236). [#23262](https://github.com/ClickHouse/ClickHouse/pull/23262) ([Maksim Kita](https://github.com/kitaisreal)). +* Fixed reading from ODBC when there are many long column names in a table. Closes [#8853](https://github.com/ClickHouse/ClickHouse/issues/8853). [#23215](https://github.com/ClickHouse/ClickHouse/pull/23215) ([Kseniia Sumarokova](https://github.com/kssenii)). +* MaterializeMySQL (experimental feature): fixed `Not found column` error when selecting from `MaterializeMySQL` with condition on key column. Fixes [#22432](https://github.com/ClickHouse/ClickHouse/issues/22432). [#23200](https://github.com/ClickHouse/ClickHouse/pull/23200) ([tavplubix](https://github.com/tavplubix)). +* Correct aliases handling if subquery was optimized to constant. Fixes [#22924](https://github.com/ClickHouse/ClickHouse/issues/22924). Fixes [#10401](https://github.com/ClickHouse/ClickHouse/issues/10401). [#23191](https://github.com/ClickHouse/ClickHouse/pull/23191) ([Maksim Kita](https://github.com/kitaisreal)). +* Server might fail to start if `data_type_default_nullable` setting is enabled in default profile, it's fixed. Fixes [#22573](https://github.com/ClickHouse/ClickHouse/issues/22573). [#23185](https://github.com/ClickHouse/ClickHouse/pull/23185) ([tavplubix](https://github.com/tavplubix)). +* Fixed a crash on shutdown which happened because of wrong accounting of current connections. [#23154](https://github.com/ClickHouse/ClickHouse/pull/23154) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fixed `Table .inner_id... doesn't exist` error when selecting from Materialized View after detaching it from Atomic database and attaching back. [#23047](https://github.com/ClickHouse/ClickHouse/pull/23047) ([tavplubix](https://github.com/tavplubix)). +* Fix error `Cannot find column in ActionsDAG result` which may happen if subquery uses `untuple`. Fixes [#22290](https://github.com/ClickHouse/ClickHouse/issues/22290). [#22991](https://github.com/ClickHouse/ClickHouse/pull/22991) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix usage of constant columns of type `Map` with nullable values. [#22939](https://github.com/ClickHouse/ClickHouse/pull/22939) ([Anton Popov](https://github.com/CurtizJ)). +* fixed `formatDateTime()` on `DateTime64` and "%C" format specifier fixed `toDateTime64()` for large values and non-zero scale. [#22937](https://github.com/ClickHouse/ClickHouse/pull/22937) ([Vasily Nemkov](https://github.com/Enmk)). +* Fixed a crash when using `mannWhitneyUTest` and `rankCorr` with window functions. This fixes [#22728](https://github.com/ClickHouse/ClickHouse/issues/22728). [#22876](https://github.com/ClickHouse/ClickHouse/pull/22876) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* LIVE VIEW (experimental feature): fixed possible hanging in concurrent DROP/CREATE of TEMPORARY LIVE VIEW in `TemporaryLiveViewCleaner`, [see](https://gist.github.com/vzakaznikov/0c03195960fc86b56bfe2bc73a90019e). [#22858](https://github.com/ClickHouse/ClickHouse/pull/22858) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fixed pushdown of `HAVING` in case, when filter column is used in aggregation. [#22763](https://github.com/ClickHouse/ClickHouse/pull/22763) ([Anton Popov](https://github.com/CurtizJ)). +* Fixed possible hangs in Zookeeper requests in case of OOM exception. Fixes [#22438](https://github.com/ClickHouse/ClickHouse/issues/22438). [#22684](https://github.com/ClickHouse/ClickHouse/pull/22684) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed wait for mutations on several replicas for ReplicatedMergeTree table engines. Previously, mutation/alter query may finish before mutation actually executed on other replicas. [#22669](https://github.com/ClickHouse/ClickHouse/pull/22669) ([alesapin](https://github.com/alesapin)). +* Fixed exception for Log with nested types without columns in the SELECT clause. [#22654](https://github.com/ClickHouse/ClickHouse/pull/22654) ([Azat Khuzhin](https://github.com/azat)). +* Fix unlimited wait for auxiliary AWS requests. [#22594](https://github.com/ClickHouse/ClickHouse/pull/22594) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Fixed a crash when client closes connection very early [#22579](https://github.com/ClickHouse/ClickHouse/issues/22579). [#22591](https://github.com/ClickHouse/ClickHouse/pull/22591) ([nvartolomei](https://github.com/nvartolomei)). +* `Map` data type (experimental feature): fixed an incorrect formatting of function `map` in distributed queries. [#22588](https://github.com/ClickHouse/ClickHouse/pull/22588) ([foolchi](https://github.com/foolchi)). +* Fixed deserialization of empty string without newline at end of TSV format. This closes [#20244](https://github.com/ClickHouse/ClickHouse/issues/20244). Possible workaround without version update: set `input_format_null_as_default` to zero. It was zero in old versions. [#22527](https://github.com/ClickHouse/ClickHouse/pull/22527) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fixed wrong cast of a column of `LowCardinality` type in Merge Join algorithm. Close [#22386](https://github.com/ClickHouse/ClickHouse/issues/22386), close [#22388](https://github.com/ClickHouse/ClickHouse/issues/22388). [#22510](https://github.com/ClickHouse/ClickHouse/pull/22510) ([Vladimir](https://github.com/vdimir)). +* Buffer overflow (on read) was possible in `tokenbf_v1` full text index. The excessive bytes are not used but the read operation may lead to crash in rare cases. This closes [#19233](https://github.com/ClickHouse/ClickHouse/issues/19233). [#22421](https://github.com/ClickHouse/ClickHouse/pull/22421) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Do not limit HTTP chunk size. Fixes [#21907](https://github.com/ClickHouse/ClickHouse/issues/21907). [#22322](https://github.com/ClickHouse/ClickHouse/pull/22322) ([Ivan](https://github.com/abyss7)). +* Fixed a bug, which leads to underaggregation of data in case of enabled `optimize_aggregation_in_order` and many parts in table. Slightly improve performance of aggregation with enabled `optimize_aggregation_in_order`. [#21889](https://github.com/ClickHouse/ClickHouse/pull/21889) ([Anton Popov](https://github.com/CurtizJ)). +* Check if table function view is used as a column. This complements #20350. [#21465](https://github.com/ClickHouse/ClickHouse/pull/21465) ([Amos Bird](https://github.com/amosbird)). +* Fix "unknown column" error for tables with `Merge` engine in queris with `JOIN` and aggregation. Closes [#18368](https://github.com/ClickHouse/ClickHouse/issues/18368), close [#22226](https://github.com/ClickHouse/ClickHouse/issues/22226). [#21370](https://github.com/ClickHouse/ClickHouse/pull/21370) ([Vladimir](https://github.com/vdimir)). +* Fixed name clashes in pushdown optimization. It caused incorrect `WHERE` filtration after FULL JOIN. Close [#20497](https://github.com/ClickHouse/ClickHouse/issues/20497). [#20622](https://github.com/ClickHouse/ClickHouse/pull/20622) ([Vladimir](https://github.com/vdimir)). +* Fixed very rare bug when quorum insert with `quorum_parallel=1` is not really "quorum" because of deduplication. [#18215](https://github.com/ClickHouse/ClickHouse/pull/18215) ([filimonov](https://github.com/filimonov) - reported, [alesapin](https://github.com/alesapin) - fixed). + +#### Build/Testing/Packaging Improvement + +* Run stateless tests in parallel in CI. [#22300](https://github.com/ClickHouse/ClickHouse/pull/22300) ([alesapin](https://github.com/alesapin)). +* Simplify debian packages. This fixes [#21698](https://github.com/ClickHouse/ClickHouse/issues/21698). [#22976](https://github.com/ClickHouse/ClickHouse/pull/22976) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Added support for ClickHouse build on Apple M1. [#21639](https://github.com/ClickHouse/ClickHouse/pull/21639) ([changvvb](https://github.com/changvvb)). +* Fixed ClickHouse Keeper build for MacOS. [#22860](https://github.com/ClickHouse/ClickHouse/pull/22860) ([alesapin](https://github.com/alesapin)). +* Fixed some tests on AArch64 platform. [#22596](https://github.com/ClickHouse/ClickHouse/pull/22596) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Added function alignment for possibly better performance. [#21431](https://github.com/ClickHouse/ClickHouse/pull/21431) ([Danila Kutenin](https://github.com/danlark1)). +* Adjust some tests to output identical results on amd64 and aarch64 (qemu). The result was depending on implementation specific CPU behaviour. [#22590](https://github.com/ClickHouse/ClickHouse/pull/22590) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Allow query profiling only on x86_64. See [#15174](https://github.com/ClickHouse/ClickHouse/issues/15174#issuecomment-812954965) and [#15638](https://github.com/ClickHouse/ClickHouse/issues/15638#issuecomment-703805337). This closes [#15638](https://github.com/ClickHouse/ClickHouse/issues/15638). [#22580](https://github.com/ClickHouse/ClickHouse/pull/22580) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Allow building with unbundled xz (lzma) using `USE_INTERNAL_XZ_LIBRARY=OFF` CMake option. [#22571](https://github.com/ClickHouse/ClickHouse/pull/22571) ([Kfir Itzhak](https://github.com/mastertheknife)). +* Enable bundled `openldap` on `ppc64le` [#22487](https://github.com/ClickHouse/ClickHouse/pull/22487) ([Kfir Itzhak](https://github.com/mastertheknife)). +* Disable incompatible libraries (platform specific typically) on `ppc64le` [#22475](https://github.com/ClickHouse/ClickHouse/pull/22475) ([Kfir Itzhak](https://github.com/mastertheknife)). +* Add Jepsen test in CI for clickhouse Keeper. [#22373](https://github.com/ClickHouse/ClickHouse/pull/22373) ([alesapin](https://github.com/alesapin)). +* Build `jemalloc` with support for [heap profiling](https://github.com/jemalloc/jemalloc/wiki/Use-Case%3A-Heap-Profiling). [#22834](https://github.com/ClickHouse/ClickHouse/pull/22834) ([nvartolomei](https://github.com/nvartolomei)). +* Avoid UB in `*Log` engines for rwlock unlock due to unlock from another thread. [#22583](https://github.com/ClickHouse/ClickHouse/pull/22583) ([Azat Khuzhin](https://github.com/azat)). +* Fixed UB by unlocking the rwlock of the TinyLog from the same thread. [#22560](https://github.com/ClickHouse/ClickHouse/pull/22560) ([Azat Khuzhin](https://github.com/azat)). + + +## ClickHouse release 21.4 + +### ClickHouse release 21.4.1 2021-04-12 + +#### Backward Incompatible Change + +* The `toStartOfIntervalFunction` will align hour intervals to the midnight (in previous versions they were aligned to the start of unix epoch). For example, `toStartOfInterval(x, INTERVAL 11 HOUR)` will split every day into three intervals: `00:00:00..10:59:59`, `11:00:00..21:59:59` and `22:00:00..23:59:59`. This behaviour is more suited for practical needs. This closes [#9510](https://github.com/ClickHouse/ClickHouse/issues/9510). [#22060](https://github.com/ClickHouse/ClickHouse/pull/22060) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* `Age` and `Precision` in graphite rollup configs should increase from retention to retention. Now it's checked and the wrong config raises an exception. [#21496](https://github.com/ClickHouse/ClickHouse/pull/21496) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix `cutToFirstSignificantSubdomainCustom()`/`firstSignificantSubdomainCustom()` returning wrong result for 3+ level domains present in custom top-level domain list. For input domains matching these custom top-level domains, the third-level domain was considered to be the first significant one. This is now fixed. This change may introduce incompatibility if the function is used in e.g. the sharding key. [#21946](https://github.com/ClickHouse/ClickHouse/pull/21946) ([Azat Khuzhin](https://github.com/azat)). +* Column `keys` in table `system.dictionaries` was replaced to columns `key.names` and `key.types`. Columns `key.names`, `key.types`, `attribute.names`, `attribute.types` from `system.dictionaries` table does not require dictionary to be loaded. [#21884](https://github.com/ClickHouse/ClickHouse/pull/21884) ([Maksim Kita](https://github.com/kitaisreal)). +* Now replicas that are processing the `ALTER TABLE ATTACH PART[ITION]` command search in their `detached/` folders before fetching the data from other replicas. As an implementation detail, a new command `ATTACH_PART` is introduced in the replicated log. Parts are searched and compared by their checksums. [#18978](https://github.com/ClickHouse/ClickHouse/pull/18978) ([Mike Kot](https://github.com/myrrc)). **Note**: + * `ATTACH PART[ITION]` queries may not work during cluster upgrade. + * It's not possible to rollback to older ClickHouse version after executing `ALTER ... ATTACH` query in new version as the old servers would fail to pass the `ATTACH_PART` entry in the replicated log. +* In this version, empty `` will block all access to remote hosts while in previous versions it did nothing. If you want to keep old behaviour and you have empty `remote_url_allow_hosts` element in configuration file, remove it. [#20058](https://github.com/ClickHouse/ClickHouse/pull/20058) ([Vladimir Chebotarev](https://github.com/excitoon)). + + +#### New Feature + +* Extended range of `DateTime64` to support dates from year 1925 to 2283. Improved support of `DateTime` around zero date (`1970-01-01`). [#9404](https://github.com/ClickHouse/ClickHouse/pull/9404) ([alexey-milovidov](https://github.com/alexey-milovidov), [Vasily Nemkov](https://github.com/Enmk)). Not every time and date functions are working for extended range of dates. +* Added support of Kerberos authentication for preconfigured users and HTTP requests (GSS-SPNEGO). [#14995](https://github.com/ClickHouse/ClickHouse/pull/14995) ([Denis Glazachev](https://github.com/traceon)). +* Add `prefer_column_name_to_alias` setting to use original column names instead of aliases. it is needed to be more compatible with common databases' aliasing rules. This is for [#9715](https://github.com/ClickHouse/ClickHouse/issues/9715) and [#9887](https://github.com/ClickHouse/ClickHouse/issues/9887). [#22044](https://github.com/ClickHouse/ClickHouse/pull/22044) ([Amos Bird](https://github.com/amosbird)). +* Added functions `dictGetChildren(dictionary, key)`, `dictGetDescendants(dictionary, key, level)`. Function `dictGetChildren` return all children as an array if indexes. It is a inverse transformation for `dictGetHierarchy`. Function `dictGetDescendants` return all descendants as if `dictGetChildren` was applied `level` times recursively. Zero `level` value is equivalent to infinity. Closes [#14656](https://github.com/ClickHouse/ClickHouse/issues/14656). [#22096](https://github.com/ClickHouse/ClickHouse/pull/22096) ([Maksim Kita](https://github.com/kitaisreal)). +* Added `executable_pool` dictionary source. Close [#14528](https://github.com/ClickHouse/ClickHouse/issues/14528). [#21321](https://github.com/ClickHouse/ClickHouse/pull/21321) ([Maksim Kita](https://github.com/kitaisreal)). +* Added table function `dictionary`. It works the same way as `Dictionary` engine. Closes [#21560](https://github.com/ClickHouse/ClickHouse/issues/21560). [#21910](https://github.com/ClickHouse/ClickHouse/pull/21910) ([Maksim Kita](https://github.com/kitaisreal)). +* Support `Nullable` type for `PolygonDictionary` attribute. [#21890](https://github.com/ClickHouse/ClickHouse/pull/21890) ([Maksim Kita](https://github.com/kitaisreal)). +* Functions `dictGet`, `dictHas` use current database name if it is not specified for dictionaries created with DDL. Closes [#21632](https://github.com/ClickHouse/ClickHouse/issues/21632). [#21859](https://github.com/ClickHouse/ClickHouse/pull/21859) ([Maksim Kita](https://github.com/kitaisreal)). +* Added function `dictGetOrNull`. It works like `dictGet`, but return `Null` in case key was not found in dictionary. Closes [#22375](https://github.com/ClickHouse/ClickHouse/issues/22375). [#22413](https://github.com/ClickHouse/ClickHouse/pull/22413) ([Maksim Kita](https://github.com/kitaisreal)). +* Added async update in `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for `Nullable` type in `Cache`, `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for multiple attributes fetch with `dictGet`, `dictGetOrDefault` functions. Fixes [#21517](https://github.com/ClickHouse/ClickHouse/issues/21517). [#20595](https://github.com/ClickHouse/ClickHouse/pull/20595) ([Maksim Kita](https://github.com/kitaisreal)). +* Support `dictHas` function for `RangeHashedDictionary`. Fixes [#6680](https://github.com/ClickHouse/ClickHouse/issues/6680). [#19816](https://github.com/ClickHouse/ClickHouse/pull/19816) ([Maksim Kita](https://github.com/kitaisreal)). +* Add function `timezoneOf` that returns the timezone name of `DateTime` or `DateTime64` data types. This does not close [#9959](https://github.com/ClickHouse/ClickHouse/issues/9959). Fix inconsistencies in function names: add aliases `timezone` and `timeZone` as well as `toTimezone` and `toTimeZone` and `timezoneOf` and `timeZoneOf`. [#22001](https://github.com/ClickHouse/ClickHouse/pull/22001) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add new optional clause `GRANTEES` for `CREATE/ALTER USER` commands. It specifies users or roles which are allowed to receive grants from this user on condition this user has also all required access granted with grant option. By default `GRANTEES ANY` is used which means a user with grant option can grant to anyone. Syntax: `CREATE USER ... GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]`. [#21641](https://github.com/ClickHouse/ClickHouse/pull/21641) ([Vitaly Baranov](https://github.com/vitlibar)). +* Add new column `slowdowns_count` to `system.clusters`. When using hedged requests, it shows how many times we switched to another replica because this replica was responding slowly. Also show actual value of `errors_count` in `system.clusters`. [#21480](https://github.com/ClickHouse/ClickHouse/pull/21480) ([Kruglov Pavel](https://github.com/Avogar)). +* Add `_partition_id` virtual column for `MergeTree*` engines. Allow to prune partitions by `_partition_id`. Add `partitionID()` function to calculate partition id string. [#21401](https://github.com/ClickHouse/ClickHouse/pull/21401) ([Amos Bird](https://github.com/amosbird)). +* Add function `isIPAddressInRange` to test if an IPv4 or IPv6 address is contained in a given CIDR network prefix. [#21329](https://github.com/ClickHouse/ClickHouse/pull/21329) ([PHO](https://github.com/depressed-pho)). +* Added new SQL command `ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name'`. This command is needed to properly remove 'freezed' partitions from all disks. [#21142](https://github.com/ClickHouse/ClickHouse/pull/21142) ([Pavel Kovalenko](https://github.com/Jokser)). +* Supports implicit key type conversion for JOIN. [#19885](https://github.com/ClickHouse/ClickHouse/pull/19885) ([Vladimir](https://github.com/vdimir)). + +#### Experimental Feature + +* Support `RANGE OFFSET` frame (for window functions) for floating point types. Implement `lagInFrame`/`leadInFrame` window functions, which are analogous to `lag`/`lead`, but respect the window frame. They are identical when the frame is `between unbounded preceding and unbounded following`. This closes [#5485](https://github.com/ClickHouse/ClickHouse/issues/5485). [#21895](https://github.com/ClickHouse/ClickHouse/pull/21895) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Zero-copy replication for `ReplicatedMergeTree` over S3 storage. [#16240](https://github.com/ClickHouse/ClickHouse/pull/16240) ([ianton-ru](https://github.com/ianton-ru)). +* Added possibility to migrate existing S3 disk to the schema with backup-restore capabilities. [#22070](https://github.com/ClickHouse/ClickHouse/pull/22070) ([Pavel Kovalenko](https://github.com/Jokser)). + +#### Performance Improvement + +* Supported parallel formatting in `clickhouse-local` and everywhere else. [#21630](https://github.com/ClickHouse/ClickHouse/pull/21630) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Support parallel parsing for `CSVWithNames` and `TSVWithNames` formats. This closes [#21085](https://github.com/ClickHouse/ClickHouse/issues/21085). [#21149](https://github.com/ClickHouse/ClickHouse/pull/21149) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Enable read with mmap IO for file ranges from 64 MiB (the settings `min_bytes_to_use_mmap_io`). It may lead to moderate performance improvement. [#22326](https://github.com/ClickHouse/ClickHouse/pull/22326) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add cache for files read with `min_bytes_to_use_mmap_io` setting. It makes significant (2x and more) performance improvement when the value of the setting is small by avoiding frequent mmap/munmap calls and the consequent page faults. Note that mmap IO has major drawbacks that makes it less reliable in production (e.g. hung or SIGBUS on faulty disks; less controllable memory usage). Nevertheless it is good in benchmarks. [#22206](https://github.com/ClickHouse/ClickHouse/pull/22206) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Avoid unnecessary data copy when using codec `NONE`. Please note that codec `NONE` is mostly useless - it's recommended to always use compression (`LZ4` is by default). Despite the common belief, disabling compression may not improve performance (the opposite effect is possible). The `NONE` codec is useful in some cases: - when data is uncompressable; - for synthetic benchmarks. [#22145](https://github.com/ClickHouse/ClickHouse/pull/22145) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Faster `GROUP BY` with small `max_rows_to_group_by` and `group_by_overflow_mode='any'`. [#21856](https://github.com/ClickHouse/ClickHouse/pull/21856) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Optimize performance of queries like `SELECT ... FINAL ... WHERE`. Now in queries with `FINAL` it's allowed to move to `PREWHERE` columns, which are in sorting key. [#21830](https://github.com/ClickHouse/ClickHouse/pull/21830) ([foolchi](https://github.com/foolchi)). +* Improved performance by replacing `memcpy` to another implementation. This closes [#18583](https://github.com/ClickHouse/ClickHouse/issues/18583). [#21520](https://github.com/ClickHouse/ClickHouse/pull/21520) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Improve performance of aggregation in order of sorting key (with enabled setting `optimize_aggregation_in_order`). [#19401](https://github.com/ClickHouse/ClickHouse/pull/19401) ([Anton Popov](https://github.com/CurtizJ)). + +#### Improvement + +* Add connection pool for PostgreSQL table/database engine and dictionary source. Should fix [#21444](https://github.com/ClickHouse/ClickHouse/issues/21444). [#21839](https://github.com/ClickHouse/ClickHouse/pull/21839) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support non-default table schema for postgres storage/table-function. Closes [#21701](https://github.com/ClickHouse/ClickHouse/issues/21701). [#21711](https://github.com/ClickHouse/ClickHouse/pull/21711) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support replicas priority for postgres dictionary source. [#21710](https://github.com/ClickHouse/ClickHouse/pull/21710) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Introduce a new merge tree setting `min_bytes_to_rebalance_partition_over_jbod` which allows assigning new parts to different disks of a JBOD volume in a balanced way. [#16481](https://github.com/ClickHouse/ClickHouse/pull/16481) ([Amos Bird](https://github.com/amosbird)). +* Added `Grant`, `Revoke` and `System` values of `query_kind` column for corresponding queries in `system.query_log`. [#21102](https://github.com/ClickHouse/ClickHouse/pull/21102) ([Vasily Nemkov](https://github.com/Enmk)). +* Allow customizing timeouts for HTTP connections used for replication independently from other HTTP timeouts. [#20088](https://github.com/ClickHouse/ClickHouse/pull/20088) ([nvartolomei](https://github.com/nvartolomei)). +* Better exception message in client in case of exception while server is writing blocks. In previous versions client may get misleading message like `Data compressed with different methods`. [#22427](https://github.com/ClickHouse/ClickHouse/pull/22427) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix error `Directory tmp_fetch_XXX already exists` which could happen after failed fetch part. Delete temporary fetch directory if it already exists. Fixes [#14197](https://github.com/ClickHouse/ClickHouse/issues/14197). [#22411](https://github.com/ClickHouse/ClickHouse/pull/22411) ([nvartolomei](https://github.com/nvartolomei)). +* Fix MSan report for function `range` with `UInt256` argument (support for large integers is experimental). This closes [#22157](https://github.com/ClickHouse/ClickHouse/issues/22157). [#22387](https://github.com/ClickHouse/ClickHouse/pull/22387) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add `current_database` column to `system.processes` table. It contains the current database of the query. [#22365](https://github.com/ClickHouse/ClickHouse/pull/22365) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Add case-insensitive history search/navigation and subword movement features to `clickhouse-client`. [#22105](https://github.com/ClickHouse/ClickHouse/pull/22105) ([Amos Bird](https://github.com/amosbird)). +* If tuple of NULLs, e.g. `(NULL, NULL)` is on the left hand side of `IN` operator with tuples of non-NULLs on the right hand side, e.g. `SELECT (NULL, NULL) IN ((0, 0), (3, 1))` return 0 instead of throwing an exception about incompatible types. The expression may also appear due to optimization of something like `SELECT (NULL, NULL) = (8, 0) OR (NULL, NULL) = (3, 2) OR (NULL, NULL) = (0, 0) OR (NULL, NULL) = (3, 1)`. This closes [#22017](https://github.com/ClickHouse/ClickHouse/issues/22017). [#22063](https://github.com/ClickHouse/ClickHouse/pull/22063) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Update used version of simdjson to 0.9.1. This fixes [#21984](https://github.com/ClickHouse/ClickHouse/issues/21984). [#22057](https://github.com/ClickHouse/ClickHouse/pull/22057) ([Vitaly Baranov](https://github.com/vitlibar)). +* Added case insensitive aliases for `CONNECTION_ID()` and `VERSION()` functions. This fixes [#22028](https://github.com/ClickHouse/ClickHouse/issues/22028). [#22042](https://github.com/ClickHouse/ClickHouse/pull/22042) ([Eugene Klimov](https://github.com/Slach)). +* Add option `strict_increase` to `windowFunnel` function to calculate each event once (resolve [#21835](https://github.com/ClickHouse/ClickHouse/issues/21835)). [#22025](https://github.com/ClickHouse/ClickHouse/pull/22025) ([Vladimir](https://github.com/vdimir)). +* If partition key of a `MergeTree` table does not include `Date` or `DateTime` columns but includes exactly one `DateTime64` column, expose its values in the `min_time` and `max_time` columns in `system.parts` and `system.parts_columns` tables. Add `min_time` and `max_time` columns to `system.parts_columns` table (these was inconsistency to the `system.parts` table). This closes [#18244](https://github.com/ClickHouse/ClickHouse/issues/18244). [#22011](https://github.com/ClickHouse/ClickHouse/pull/22011) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Supported `replication_alter_partitions_sync=1` setting in `clickhouse-copier` for moving partitions from helping table to destination. Decreased default timeouts. Fixes [#21911](https://github.com/ClickHouse/ClickHouse/issues/21911). [#21912](https://github.com/ClickHouse/ClickHouse/pull/21912) ([turbo jason](https://github.com/songenjie)). +* Show path to data directory of `EmbeddedRocksDB` tables in system tables. [#21903](https://github.com/ClickHouse/ClickHouse/pull/21903) ([tavplubix](https://github.com/tavplubix)). +* Add profile event `HedgedRequestsChangeReplica`, change read data timeout from sec to ms. [#21886](https://github.com/ClickHouse/ClickHouse/pull/21886) ([Kruglov Pavel](https://github.com/Avogar)). +* DiskS3 (experimental feature under development). Fixed bug with the impossibility to move directory if the destination is not empty and cache disk is used. [#21837](https://github.com/ClickHouse/ClickHouse/pull/21837) ([Pavel Kovalenko](https://github.com/Jokser)). +* Better formatting for `Array` and `Map` data types in Web UI. [#21798](https://github.com/ClickHouse/ClickHouse/pull/21798) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Update clusters only if their configurations were updated. [#21685](https://github.com/ClickHouse/ClickHouse/pull/21685) ([Kruglov Pavel](https://github.com/Avogar)). +* Propagate query and session settings for distributed DDL queries. Set `distributed_ddl_entry_format_version` to 2 to enable this. Added `distributed_ddl_output_mode` setting. Supported modes: `none`, `throw` (default), `null_status_on_timeout` and `never_throw`. Miscellaneous fixes and improvements for `Replicated` database engine. [#21535](https://github.com/ClickHouse/ClickHouse/pull/21535) ([tavplubix](https://github.com/tavplubix)). +* If `PODArray` was instantiated with element size that is neither a fraction or a multiple of 16, buffer overflow was possible. No bugs in current releases exist. [#21533](https://github.com/ClickHouse/ClickHouse/pull/21533) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add `last_error_time`/`last_error_message`/`last_error_stacktrace`/`remote` columns for `system.errors`. [#21529](https://github.com/ClickHouse/ClickHouse/pull/21529) ([Azat Khuzhin](https://github.com/azat)). +* Add aliases `simpleJSONExtract/simpleJSONHas` to `visitParam/visitParamExtract{UInt, Int, Bool, Float, Raw, String}`. Fixes #21383. [#21519](https://github.com/ClickHouse/ClickHouse/pull/21519) ([fastio](https://github.com/fastio)). +* Add setting `optimize_skip_unused_shards_limit` to limit the number of sharding key values for `optimize_skip_unused_shards`. [#21512](https://github.com/ClickHouse/ClickHouse/pull/21512) ([Azat Khuzhin](https://github.com/azat)). +* Improve `clickhouse-format` to not throw exception when there are extra spaces or comment after the last query, and throw exception early with readable message when format `ASTInsertQuery` with data . [#21311](https://github.com/ClickHouse/ClickHouse/pull/21311) ([flynn](https://github.com/ucasFL)). +* Improve support of integer keys in data type `Map`. [#21157](https://github.com/ClickHouse/ClickHouse/pull/21157) ([Anton Popov](https://github.com/CurtizJ)). +* MaterializeMySQL: attempt to reconnect to MySQL if the connection is lost. [#20961](https://github.com/ClickHouse/ClickHouse/pull/20961) ([Håvard Kvålen](https://github.com/havardk)). +* Support more cases to rewrite `CROSS JOIN` to `INNER JOIN`. [#20392](https://github.com/ClickHouse/ClickHouse/pull/20392) ([Vladimir](https://github.com/vdimir)). +* Do not create empty parts on INSERT when `optimize_on_insert` setting enabled. Fixes [#20304](https://github.com/ClickHouse/ClickHouse/issues/20304). [#20387](https://github.com/ClickHouse/ClickHouse/pull/20387) ([Kruglov Pavel](https://github.com/Avogar)). +* `MaterializeMySQL`: add minmax skipping index for `_version` column. [#20382](https://github.com/ClickHouse/ClickHouse/pull/20382) ([Stig Bakken](https://github.com/stigsb)). +* Add option `--backslash` for `clickhouse-format`, which can add a backslash at the end of each line of the formatted query. [#21494](https://github.com/ClickHouse/ClickHouse/pull/21494) ([flynn](https://github.com/ucasFL)). +* Now clickhouse will not throw `LOGICAL_ERROR` exception when we try to mutate the already covered part. Fixes [#22013](https://github.com/ClickHouse/ClickHouse/issues/22013). [#22291](https://github.com/ClickHouse/ClickHouse/pull/22291) ([alesapin](https://github.com/alesapin)). + +#### Bug Fix + +* Remove socket from epoll before cancelling packet receiver in `HedgedConnections` to prevent possible race. Fixes [#22161](https://github.com/ClickHouse/ClickHouse/issues/22161). [#22443](https://github.com/ClickHouse/ClickHouse/pull/22443) ([Kruglov Pavel](https://github.com/Avogar)). +* Add (missing) memory accounting in parallel parsing routines. In previous versions OOM was possible when the resultset contains very large blocks of data. This closes [#22008](https://github.com/ClickHouse/ClickHouse/issues/22008). [#22425](https://github.com/ClickHouse/ClickHouse/pull/22425) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix exception which may happen when `SELECT` has constant `WHERE` condition and source table has columns which names are digits. [#22270](https://github.com/ClickHouse/ClickHouse/pull/22270) ([LiuNeng](https://github.com/liuneng1994)). +* Fix query cancellation with `use_hedged_requests=0` and `async_socket_for_remote=1`. [#22183](https://github.com/ClickHouse/ClickHouse/pull/22183) ([Azat Khuzhin](https://github.com/azat)). +* Fix uncaught exception in `InterserverIOHTTPHandler`. [#22146](https://github.com/ClickHouse/ClickHouse/pull/22146) ([Azat Khuzhin](https://github.com/azat)). +* Fix docker entrypoint in case `http_port` is not in the config. [#22132](https://github.com/ClickHouse/ClickHouse/pull/22132) ([Ewout](https://github.com/devwout)). +* Fix error `Invalid number of rows in Chunk` in `JOIN` with `TOTALS` and `arrayJoin`. Closes [#19303](https://github.com/ClickHouse/ClickHouse/issues/19303). [#22129](https://github.com/ClickHouse/ClickHouse/pull/22129) ([Vladimir](https://github.com/vdimir)). +* Fix the background thread pool name which used to poll message from Kafka. The Kafka engine with the broken thread pool will not consume the message from message queue. [#22122](https://github.com/ClickHouse/ClickHouse/pull/22122) ([fastio](https://github.com/fastio)). +* Fix waiting for `OPTIMIZE` and `ALTER` queries for `ReplicatedMergeTree` table engines. Now the query will not hang when the table was detached or restarted. [#22118](https://github.com/ClickHouse/ClickHouse/pull/22118) ([alesapin](https://github.com/alesapin)). +* Disable `async_socket_for_remote`/`use_hedged_requests` for buggy Linux kernels. [#22109](https://github.com/ClickHouse/ClickHouse/pull/22109) ([Azat Khuzhin](https://github.com/azat)). +* Docker entrypoint: avoid chown of `.` in case when `LOG_PATH` is empty. Closes [#22100](https://github.com/ClickHouse/ClickHouse/issues/22100). [#22102](https://github.com/ClickHouse/ClickHouse/pull/22102) ([filimonov](https://github.com/filimonov)). +* The function `decrypt` was lacking a check for the minimal size of data encrypted in `AEAD` mode. This closes [#21897](https://github.com/ClickHouse/ClickHouse/issues/21897). [#22064](https://github.com/ClickHouse/ClickHouse/pull/22064) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* In rare case, merge for `CollapsingMergeTree` may create granule with `index_granularity + 1` rows. Because of this, internal check, added in [#18928](https://github.com/ClickHouse/ClickHouse/issues/18928) (affects 21.2 and 21.3), may fail with error `Incomplete granules are not allowed while blocks are granules size`. This error did not allow parts to merge. [#21976](https://github.com/ClickHouse/ClickHouse/pull/21976) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Reverted [#15454](https://github.com/ClickHouse/ClickHouse/issues/15454) that may cause significant increase in memory usage while loading external dictionaries of hashed type. This closes [#21935](https://github.com/ClickHouse/ClickHouse/issues/21935). [#21948](https://github.com/ClickHouse/ClickHouse/pull/21948) ([Maksim Kita](https://github.com/kitaisreal)). +* Prevent hedged connections overlaps (`Unknown packet 9 from server` error). [#21941](https://github.com/ClickHouse/ClickHouse/pull/21941) ([Azat Khuzhin](https://github.com/azat)). +* Fix reading the HTTP POST request with "multipart/form-data" content type in some cases. [#21936](https://github.com/ClickHouse/ClickHouse/pull/21936) ([Ivan](https://github.com/abyss7)). +* Fix wrong `ORDER BY` results when a query contains window functions, and optimization for reading in primary key order is applied. Fixes [#21828](https://github.com/ClickHouse/ClickHouse/issues/21828). [#21915](https://github.com/ClickHouse/ClickHouse/pull/21915) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Fix deadlock in first catboost model execution. Closes [#13832](https://github.com/ClickHouse/ClickHouse/issues/13832). [#21844](https://github.com/ClickHouse/ClickHouse/pull/21844) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix incorrect query result (and possible crash) which could happen when `WHERE` or `HAVING` condition is pushed before `GROUP BY`. Fixes [#21773](https://github.com/ClickHouse/ClickHouse/issues/21773). [#21841](https://github.com/ClickHouse/ClickHouse/pull/21841) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Better error handling and logging in `WriteBufferFromS3`. [#21836](https://github.com/ClickHouse/ClickHouse/pull/21836) ([Pavel Kovalenko](https://github.com/Jokser)). +* Fix possible crashes in aggregate functions with combinator `Distinct`, while using two-level aggregation. This is a follow-up fix of [#18365](https://github.com/ClickHouse/ClickHouse/pull/18365) . Can only reproduced in production env. [#21818](https://github.com/ClickHouse/ClickHouse/pull/21818) ([Amos Bird](https://github.com/amosbird)). +* Fix scalar subquery index analysis. This fixes [#21717](https://github.com/ClickHouse/ClickHouse/issues/21717) , which was introduced in [#18896](https://github.com/ClickHouse/ClickHouse/pull/18896). [#21766](https://github.com/ClickHouse/ClickHouse/pull/21766) ([Amos Bird](https://github.com/amosbird)). +* Fix bug for `ReplicatedMerge` table engines when `ALTER MODIFY COLUMN` query doesn't change the type of `Decimal` column if its size (32 bit or 64 bit) doesn't change. [#21728](https://github.com/ClickHouse/ClickHouse/pull/21728) ([alesapin](https://github.com/alesapin)). +* Fix possible infinite waiting when concurrent `OPTIMIZE` and `DROP` are run for `ReplicatedMergeTree`. [#21716](https://github.com/ClickHouse/ClickHouse/pull/21716) ([Azat Khuzhin](https://github.com/azat)). +* Fix function `arrayElement` with type `Map` for constant integer arguments. [#21699](https://github.com/ClickHouse/ClickHouse/pull/21699) ([Anton Popov](https://github.com/CurtizJ)). +* Fix SIGSEGV on not existing attributes from `ip_trie` with `access_to_key_from_attributes`. [#21692](https://github.com/ClickHouse/ClickHouse/pull/21692) ([Azat Khuzhin](https://github.com/azat)). +* Server now start accepting connections only after `DDLWorker` and dictionaries initialization. [#21676](https://github.com/ClickHouse/ClickHouse/pull/21676) ([Azat Khuzhin](https://github.com/azat)). +* Add type conversion for keys of tables of type `Join` (previously led to SIGSEGV). [#21646](https://github.com/ClickHouse/ClickHouse/pull/21646) ([Azat Khuzhin](https://github.com/azat)). +* Fix distributed requests cancellation (for example simple select from multiple shards with limit, i.e. `select * from remote('127.{2,3}', system.numbers) limit 100`) with `async_socket_for_remote=1`. [#21643](https://github.com/ClickHouse/ClickHouse/pull/21643) ([Azat Khuzhin](https://github.com/azat)). +* Fix `fsync_part_directory` for horizontal merge. [#21642](https://github.com/ClickHouse/ClickHouse/pull/21642) ([Azat Khuzhin](https://github.com/azat)). +* Remove unknown columns from joined table in `WHERE` for queries to external database engines (MySQL, PostgreSQL). close [#14614](https://github.com/ClickHouse/ClickHouse/issues/14614), close [#19288](https://github.com/ClickHouse/ClickHouse/issues/19288) (dup), close [#19645](https://github.com/ClickHouse/ClickHouse/issues/19645) (dup). [#21640](https://github.com/ClickHouse/ClickHouse/pull/21640) ([Vladimir](https://github.com/vdimir)). +* `std::terminate` was called if there is an error writing data into s3. [#21624](https://github.com/ClickHouse/ClickHouse/pull/21624) ([Vladimir](https://github.com/vdimir)). +* Fix possible error `Cannot find column` when `optimize_skip_unused_shards` is enabled and zero shards are used. [#21579](https://github.com/ClickHouse/ClickHouse/pull/21579) ([Azat Khuzhin](https://github.com/azat)). +* In case if query has constant `WHERE` condition, and setting `optimize_skip_unused_shards` enabled, all shards may be skipped and query could return incorrect empty result. [#21550](https://github.com/ClickHouse/ClickHouse/pull/21550) ([Amos Bird](https://github.com/amosbird)). +* Fix table function `clusterAllReplicas` returns wrong `_shard_num`. close [#21481](https://github.com/ClickHouse/ClickHouse/issues/21481). [#21498](https://github.com/ClickHouse/ClickHouse/pull/21498) ([flynn](https://github.com/ucasFL)). +* Fix that S3 table holds old credentials after config update. [#21457](https://github.com/ClickHouse/ClickHouse/pull/21457) ([Grigory Pervakov](https://github.com/GrigoryPervakov)). +* Fixed race on SSL object inside `SecureSocket` in Poco. [#21456](https://github.com/ClickHouse/ClickHouse/pull/21456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix `Avro` format parsing for `Kafka`. Fixes [#21437](https://github.com/ClickHouse/ClickHouse/issues/21437). [#21438](https://github.com/ClickHouse/ClickHouse/pull/21438) ([Ilya Golshtein](https://github.com/ilejn)). +* Fix receive and send timeouts and non-blocking read in secure socket. [#21429](https://github.com/ClickHouse/ClickHouse/pull/21429) ([Kruglov Pavel](https://github.com/Avogar)). +* `force_drop_table` flag didn't work for `MATERIALIZED VIEW`, it's fixed. Fixes [#18943](https://github.com/ClickHouse/ClickHouse/issues/18943). [#20626](https://github.com/ClickHouse/ClickHouse/pull/20626) ([tavplubix](https://github.com/tavplubix)). +* Fix name clashes in `PredicateRewriteVisitor`. It caused incorrect `WHERE` filtration after full join. Close [#20497](https://github.com/ClickHouse/ClickHouse/issues/20497). [#20622](https://github.com/ClickHouse/ClickHouse/pull/20622) ([Vladimir](https://github.com/vdimir)). + +#### Build/Testing/Packaging Improvement + +* Add [Jepsen](https://github.com/jepsen-io/jepsen) tests for ClickHouse Keeper. [#21677](https://github.com/ClickHouse/ClickHouse/pull/21677) ([alesapin](https://github.com/alesapin)). +* Run stateless tests in parallel in CI. Depends on [#22181](https://github.com/ClickHouse/ClickHouse/issues/22181). [#22300](https://github.com/ClickHouse/ClickHouse/pull/22300) ([alesapin](https://github.com/alesapin)). +* Enable status check for [SQLancer](https://github.com/sqlancer/sqlancer) CI run. [#22015](https://github.com/ClickHouse/ClickHouse/pull/22015) ([Ilya Yatsishin](https://github.com/qoega)). +* Multiple preparations for PowerPC builds: Enable the bundled openldap on `ppc64le`. [#22487](https://github.com/ClickHouse/ClickHouse/pull/22487) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable compiling on `ppc64le` with Clang. [#22476](https://github.com/ClickHouse/ClickHouse/pull/22476) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix compiling boost on `ppc64le`. [#22474](https://github.com/ClickHouse/ClickHouse/pull/22474) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix CMake error about internal CMake variable `CMAKE_ASM_COMPILE_OBJECT` not set on `ppc64le`. [#22469](https://github.com/ClickHouse/ClickHouse/pull/22469) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix Fedora/RHEL/CentOS not finding `libclang_rt.builtins` on `ppc64le`. [#22458](https://github.com/ClickHouse/ClickHouse/pull/22458) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable building with `jemalloc` on `ppc64le`. [#22447](https://github.com/ClickHouse/ClickHouse/pull/22447) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix ClickHouse's config embedding and cctz's timezone embedding on `ppc64le`. [#22445](https://github.com/ClickHouse/ClickHouse/pull/22445) ([Kfir Itzhak](https://github.com/mastertheknife)). Fixed compiling on `ppc64le` and use the correct instruction pointer register on `ppc64le`. [#22430](https://github.com/ClickHouse/ClickHouse/pull/22430) ([Kfir Itzhak](https://github.com/mastertheknife)). +* Re-enable the S3 (AWS) library on `aarch64`. [#22484](https://github.com/ClickHouse/ClickHouse/pull/22484) ([Kfir Itzhak](https://github.com/mastertheknife)). +* Add `tzdata` to Docker containers because reading `ORC` formats requires it. This closes [#14156](https://github.com/ClickHouse/ClickHouse/issues/14156). [#22000](https://github.com/ClickHouse/ClickHouse/pull/22000) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Introduce 2 arguments for `clickhouse-server` image Dockerfile: `deb_location` & `single_binary_location`. [#21977](https://github.com/ClickHouse/ClickHouse/pull/21977) ([filimonov](https://github.com/filimonov)). +* Allow to use clang-tidy with release builds by enabling assertions if it is used. [#21914](https://github.com/ClickHouse/ClickHouse/pull/21914) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add llvm-12 binaries name to search in cmake scripts. Implicit constants conversions to mute clang warnings. Updated submodules to build with CMake 3.19. Mute recursion in macro expansion in `readpassphrase` library. Deprecated `-fuse-ld` changed to `--ld-path` for clang. [#21597](https://github.com/ClickHouse/ClickHouse/pull/21597) ([Ilya Yatsishin](https://github.com/qoega)). +* Updating `docker/test/testflows/runner/dockerd-entrypoint.sh` to use Yandex dockerhub-proxy, because Docker Hub has enabled very restrictive rate limits [#21551](https://github.com/ClickHouse/ClickHouse/pull/21551) ([vzakaznikov](https://github.com/vzakaznikov)). +* Fix macOS shared lib build. [#20184](https://github.com/ClickHouse/ClickHouse/pull/20184) ([nvartolomei](https://github.com/nvartolomei)). +* Add `ctime` option to `zookeeper-dump-tree`. It allows to dump node creation time. [#21842](https://github.com/ClickHouse/ClickHouse/pull/21842) ([Ilya](https://github.com/HumanUser)). + + ## ClickHouse release 21.3 (LTS) ### ClickHouse release v21.3, 2021-03-12 @@ -26,7 +318,7 @@ #### Experimental feature * Add experimental `Replicated` database engine. It replicates DDL queries across multiple hosts. [#16193](https://github.com/ClickHouse/ClickHouse/pull/16193) ([tavplubix](https://github.com/tavplubix)). -* Introduce experimental support for window functions, enabled with `allow_experimental_functions = 1`. This is a preliminary, alpha-quality implementation that is not suitable for production use and will change in backward-incompatible ways in future releases. Please see [the documentation](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/sql-reference/window-functions/index.md#experimental-window-functions) for the list of supported features. [#20337](https://github.com/ClickHouse/ClickHouse/pull/20337) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Introduce experimental support for window functions, enabled with `allow_experimental_window_functions = 1`. This is a preliminary, alpha-quality implementation that is not suitable for production use and will change in backward-incompatible ways in future releases. Please see [the documentation](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/sql-reference/window-functions/index.md#experimental-window-functions) for the list of supported features. [#20337](https://github.com/ClickHouse/ClickHouse/pull/20337) ([Alexander Kuzmenkov](https://github.com/akuzm)). * Add the ability to backup/restore metadata files for DiskS3. [#18377](https://github.com/ClickHouse/ClickHouse/pull/18377) ([Pavel Kovalenko](https://github.com/Jokser)). #### Performance Improvement diff --git a/CMakeLists.txt b/CMakeLists.txt index d310f7c298c..9c62748ff95 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,9 +36,11 @@ option(FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION if(FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION) set(RECONFIGURE_MESSAGE_LEVEL FATAL_ERROR) else() - set(RECONFIGURE_MESSAGE_LEVEL STATUS) + set(RECONFIGURE_MESSAGE_LEVEL WARNING) endif() +enable_language(C CXX ASM) + include (cmake/arch.cmake) include (cmake/target.cmake) include (cmake/tools.cmake) @@ -66,17 +68,30 @@ endif () include (cmake/find/ccache.cmake) -option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling" OFF) +# Take care to add prlimit in command line before ccache, or else ccache thinks that +# prlimit is compiler, and clang++ is its input file, and refuses to work with +# multiple inputs, e.g in ccache log: +# [2021-03-31T18:06:32.655327 36900] Command line: /usr/bin/ccache prlimit --as=10000000000 --data=5000000000 --cpu=600 /usr/bin/clang++-11 - ...... std=gnu++2a -MD -MT src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -MF src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o.d -o src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -c ../src/Storages/MergeTree/IMergeTreeDataPart.cpp +# +# [2021-03-31T18:06:32.656704 36900] Multiple input files: /usr/bin/clang++-11 and ../src/Storages/MergeTree/IMergeTreeDataPart.cpp +# +# Another way would be to use --ccache-skip option before clang++-11 to make +# ccache ignore it. +option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling." OFF) if (ENABLE_CHECK_HEAVY_BUILDS) # set DATA (since RSS does not work since 2.6.x+) to 2G set (RLIMIT_DATA 5000000000) # set VIRT (RLIMIT_AS) to 10G (DATA*10) set (RLIMIT_AS 10000000000) + # set CPU time limit to 600 seconds + set (RLIMIT_CPU 600) + # gcc10/gcc10/clang -fsanitize=memory is too heavy if (SANITIZE STREQUAL "memory" OR COMPILER_GCC) set (RLIMIT_DATA 10000000000) endif() - set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=600) + + set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER}) endif () if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None") @@ -152,9 +167,10 @@ endif () # If turned `ON`, assumes the user has either the system GTest library or the bundled one. option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON) +option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF) -if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0") - # Only for Linux, x86_64. +if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0") + # Only for Linux, x86_64 or aarch64. option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON) elseif(GLIBC_COMPATIBILITY) message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration") @@ -242,31 +258,50 @@ endif() include(cmake/cpu_features.cmake) -option(ARCH_NATIVE "Add -march=native compiler flag") +option(ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries non-portable but more performant code may be generated.") if (ARCH_NATIVE) set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native") endif () -if (COMPILER_GCC OR COMPILER_CLANG) - # to make numeric_limits<__int128> works with GCC - set (_CXX_STANDARD "gnu++2a") -else() - set (_CXX_STANDARD "c++2a") -endif() +# Asynchronous unwind tables are needed for Query Profiler. +# They are already by default on some platforms but possibly not on all platforms. +# Enable it explicitly. +set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables") -# cmake < 3.12 doesn't support 20. We'll set CMAKE_CXX_FLAGS for now -# set (CMAKE_CXX_STANDARD 20) -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}") +if (${CMAKE_VERSION} VERSION_LESS "3.12.4") + # CMake < 3.12 doesn't support setting 20 as a C++ standard version. + # We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now. -set (CMAKE_CXX_EXTENSIONS 0) # https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html#prop_tgt:CXX_EXTENSIONS -set (CMAKE_CXX_STANDARD_REQUIRED ON) + if (COMPILER_GCC OR COMPILER_CLANG) + # to make numeric_limits<__int128> works with GCC + set (_CXX_STANDARD "gnu++2a") + else () + set (_CXX_STANDARD "c++2a") + endif () + + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}") +else () + set (CMAKE_CXX_STANDARD 20) + set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html + set (CMAKE_CXX_STANDARD_REQUIRED ON) +endif () + +set (CMAKE_C_STANDARD 11) +set (CMAKE_C_EXTENSIONS ON) +set (CMAKE_C_STANDARD_REQUIRED ON) if (COMPILER_GCC OR COMPILER_CLANG) # Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure. set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation") endif () +# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable +# benchmarks. +if (COMPILER_GCC OR COMPILER_CLANG) + set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32") +endif () + # Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF) @@ -454,6 +489,7 @@ find_contrib_lib(double-conversion) # Must be before parquet include (cmake/find/ssl.cmake) include (cmake/find/ldap.cmake) # after ssl include (cmake/find/icu.cmake) +include (cmake/find/xz.cmake) include (cmake/find/zlib.cmake) include (cmake/find/zstd.cmake) include (cmake/find/ltdl.cmake) # for odbc @@ -464,10 +500,10 @@ include (cmake/find/krb5.cmake) include (cmake/find/libgsasl.cmake) include (cmake/find/cyrus-sasl.cmake) include (cmake/find/rdkafka.cmake) +include (cmake/find/libuv.cmake) # for amqpcpp and cassandra include (cmake/find/amqpcpp.cmake) include (cmake/find/capnp.cmake) include (cmake/find/llvm.cmake) -include (cmake/find/termcap.cmake) # for external static llvm include (cmake/find/h3.cmake) include (cmake/find/libxml2.cmake) include (cmake/find/brotli.cmake) @@ -486,9 +522,11 @@ include (cmake/find/fast_float.cmake) include (cmake/find/rapidjson.cmake) include (cmake/find/fastops.cmake) include (cmake/find/odbc.cmake) +include (cmake/find/nanodbc.cmake) include (cmake/find/rocksdb.cmake) include (cmake/find/libpqxx.cmake) include (cmake/find/nuraft.cmake) +include (cmake/find/yaml-cpp.cmake) if(NOT USE_INTERNAL_PARQUET_LIBRARY) @@ -501,6 +539,7 @@ include (cmake/find/msgpack.cmake) include (cmake/find/cassandra.cmake) include (cmake/find/sentry.cmake) include (cmake/find/stats.cmake) +include (cmake/find/datasketches.cmake) set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "") find_contrib_lib(cityhash) @@ -554,6 +593,9 @@ include_directories(${ConfigIncludePath}) # Add as many warnings as possible for our own code. include (cmake/warnings.cmake) +# Check if needed compiler flags are supported +include (cmake/check_flags.cmake) + add_subdirectory (base) add_subdirectory (src) add_subdirectory (programs) diff --git a/README.md b/README.md index ea9f365a3c6..5677837815c 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,11 @@ ClickHouse® is an open-source column-oriented database management system that a * [Tutorial](https://clickhouse.tech/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster. * [Documentation](https://clickhouse.tech/docs/en/) provides more in-depth information. * [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format. -* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-nwwakmk4-xOJ6cdy0sJC3It8j348~IA) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time. +* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-qfort0u8-TWqK4wIP0YSdoDE0btKa1w) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time. * [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events. * [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation. * [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any. * You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person. + +## Upcoming Events +* [SF Bay Area ClickHouse Community Meetup (online)](https://www.meetup.com/San-Francisco-Bay-Area-ClickHouse-Meetup/events/278144089/) on 16 June 2021. diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 46bd57eda12..023dcaaccae 100644 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -8,6 +8,7 @@ add_subdirectory (loggers) add_subdirectory (pcg-random) add_subdirectory (widechar_width) add_subdirectory (readpassphrase) +add_subdirectory (bridge) if (USE_MYSQL) add_subdirectory (mysqlxx) diff --git a/base/bridge/CMakeLists.txt b/base/bridge/CMakeLists.txt new file mode 100644 index 00000000000..bcba43e8c2e --- /dev/null +++ b/base/bridge/CMakeLists.txt @@ -0,0 +1,13 @@ +add_library (bridge + IBridge.cpp +) + +target_include_directories (daemon PUBLIC ..) +target_link_libraries (bridge + PRIVATE + daemon + dbms + Poco::Data + Poco::Data::ODBC +) + diff --git a/base/bridge/IBridge.cpp b/base/bridge/IBridge.cpp new file mode 100644 index 00000000000..b2ec53158b1 --- /dev/null +++ b/base/bridge/IBridge.cpp @@ -0,0 +1,233 @@ +#include "IBridge.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if USE_ODBC +# include +#endif + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ARGUMENT_OUT_OF_BOUND; +} + +namespace +{ + Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log) + { + Poco::Net::SocketAddress socket_address; + try + { + socket_address = Poco::Net::SocketAddress(host, port); + } + catch (const Poco::Net::DNSException & e) + { + const auto code = e.code(); + if (code == EAI_FAMILY +#if defined(EAI_ADDRFAMILY) + || code == EAI_ADDRFAMILY +#endif + ) + { + LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. If it is an IPv6 address and your host has disabled IPv6, then consider to specify IPv4 address to listen in element of configuration file. Example: 0.0.0.0", host, e.code(), e.message()); + } + + throw; + } + return socket_address; + } + + Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, Poco::Logger * log) + { + auto address = makeSocketAddress(host, port, log); +#if POCO_VERSION < 0x01080000 + socket.bind(address, /* reuseAddress = */ true); +#else + socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ false); +#endif + + socket.listen(/* backlog = */ 64); + + return address; + } +} + + +void IBridge::handleHelp(const std::string &, const std::string &) +{ + Poco::Util::HelpFormatter help_formatter(options()); + help_formatter.setCommand(commandName()); + help_formatter.setHeader("HTTP-proxy for odbc requests"); + help_formatter.setUsage("--http-port "); + help_formatter.format(std::cerr); + + stopOptionsProcessing(); +} + + +void IBridge::defineOptions(Poco::Util::OptionSet & options) +{ + options.addOption( + Poco::Util::Option("http-port", "", "port to listen").argument("http-port", true) .binding("http-port")); + + options.addOption( + Poco::Util::Option("listen-host", "", "hostname or address to listen, default 127.0.0.1").argument("listen-host").binding("listen-host")); + + options.addOption( + Poco::Util::Option("http-timeout", "", "http timeout for socket, default 1800").argument("http-timeout").binding("http-timeout")); + + options.addOption( + Poco::Util::Option("max-server-connections", "", "max connections to server, default 1024").argument("max-server-connections").binding("max-server-connections")); + + options.addOption( + Poco::Util::Option("keep-alive-timeout", "", "keepalive timeout, default 10").argument("keep-alive-timeout").binding("keep-alive-timeout")); + + options.addOption( + Poco::Util::Option("log-level", "", "sets log level, default info") .argument("log-level").binding("logger.level")); + + options.addOption( + Poco::Util::Option("log-path", "", "log path for all logs, default console").argument("log-path").binding("logger.log")); + + options.addOption( + Poco::Util::Option("err-log-path", "", "err log path for all logs, default no").argument("err-log-path").binding("logger.errorlog")); + + options.addOption( + Poco::Util::Option("stdout-path", "", "stdout log path, default console").argument("stdout-path").binding("logger.stdout")); + + options.addOption( + Poco::Util::Option("stderr-path", "", "stderr log path, default console").argument("stderr-path").binding("logger.stderr")); + + using Me = std::decay_t; + + options.addOption( + Poco::Util::Option("help", "", "produce this help message").binding("help").callback(Poco::Util::OptionCallback(this, &Me::handleHelp))); + + ServerApplication::defineOptions(options); // NOLINT Don't need complex BaseDaemon's .xml config +} + + +void IBridge::initialize(Application & self) +{ + BaseDaemon::closeFDs(); + is_help = config().has("help"); + + if (is_help) + return; + + config().setString("logger", bridgeName()); + + /// Redirect stdout, stderr to specified files. + /// Some libraries and sanitizers write to stderr in case of errors. + const auto stdout_path = config().getString("logger.stdout", ""); + if (!stdout_path.empty()) + { + if (!freopen(stdout_path.c_str(), "a+", stdout)) + throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path); + + /// Disable buffering for stdout. + setbuf(stdout, nullptr); + } + const auto stderr_path = config().getString("logger.stderr", ""); + if (!stderr_path.empty()) + { + if (!freopen(stderr_path.c_str(), "a+", stderr)) + throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path); + + /// Disable buffering for stderr. + setbuf(stderr, nullptr); + } + + buildLoggers(config(), logger(), self.commandName()); + + BaseDaemon::logRevision(); + + log = &logger(); + hostname = config().getString("listen-host", "127.0.0.1"); + port = config().getUInt("http-port"); + if (port > 0xFFFF) + throw Exception("Out of range 'http-port': " + std::to_string(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + + http_timeout = config().getUInt64("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT); + max_server_connections = config().getUInt("max-server-connections", 1024); + keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10); + + initializeTerminationAndSignalProcessing(); + + ServerApplication::initialize(self); // NOLINT +} + + +void IBridge::uninitialize() +{ + BaseDaemon::uninitialize(); +} + + +int IBridge::main(const std::vector & /*args*/) +{ + if (is_help) + return Application::EXIT_OK; + + registerFormats(); + LOG_INFO(log, "Starting up {} on host: {}, port: {}", bridgeName(), hostname, port); + + Poco::Net::ServerSocket socket; + auto address = socketBindListen(socket, hostname, port, log); + socket.setReceiveTimeout(http_timeout); + socket.setSendTimeout(http_timeout); + + Poco::ThreadPool server_pool(3, max_server_connections); + + Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams; + http_params->setTimeout(http_timeout); + http_params->setKeepAliveTimeout(keep_alive_timeout); + + auto shared_context = Context::createShared(); + auto context = Context::createGlobal(shared_context.get()); + context->makeGlobalContext(); + + if (config().has("query_masking_rules")) + SensitiveDataMasker::setInstance(std::make_unique(config(), "query_masking_rules")); + + auto server = HTTPServer( + context, + getHandlerFactoryPtr(context), + server_pool, + socket, + http_params); + + SCOPE_EXIT({ + LOG_DEBUG(log, "Received termination signal."); + LOG_DEBUG(log, "Waiting for current connections to close."); + + server.stop(); + + for (size_t count : ext::range(1, 6)) + { + if (server.currentConnections() == 0) + break; + LOG_DEBUG(log, "Waiting for {} connections, try {}", server.currentConnections(), count); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + }); + + server.start(); + LOG_INFO(log, "Listening http://{}", address.toString()); + + waitForTerminationRequest(); + return Application::EXIT_OK; +} + +} diff --git a/base/bridge/IBridge.h b/base/bridge/IBridge.h new file mode 100644 index 00000000000..c64003d9959 --- /dev/null +++ b/base/bridge/IBridge.h @@ -0,0 +1,51 @@ +#pragma once + +#include +#include +#include + +#include +#include + + +namespace DB +{ + +/// Class represents base for clickhouse-odbc-bridge and clickhouse-library-bridge servers. +/// Listens to incoming HTTP POST and GET requests on specified port and host. +/// Has two handlers '/' for all incoming POST requests and /ping for GET request about service status. +class IBridge : public BaseDaemon +{ + +public: + /// Define command line arguments + void defineOptions(Poco::Util::OptionSet & options) override; + +protected: + using HandlerFactoryPtr = std::shared_ptr; + + void initialize(Application & self) override; + + void uninitialize() override; + + int main(const std::vector & args) override; + + virtual std::string bridgeName() const = 0; + + virtual HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const = 0; + + size_t keep_alive_timeout; + +private: + void handleHelp(const std::string &, const std::string &); + + bool is_help; + std::string hostname; + size_t port; + std::string log_level; + size_t max_server_connections; + size_t http_timeout; + + Poco::Logger * log; +}; +} diff --git a/src/Common/BorrowedObjectPool.h b/base/common/BorrowedObjectPool.h similarity index 99% rename from src/Common/BorrowedObjectPool.h rename to base/common/BorrowedObjectPool.h index d5263cf92a8..6a90a7e7122 100644 --- a/src/Common/BorrowedObjectPool.h +++ b/base/common/BorrowedObjectPool.h @@ -7,8 +7,7 @@ #include #include - -#include +#include /** Pool for limited size objects that cannot be used from different threads simultaneously. * The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime diff --git a/base/common/CMakeLists.txt b/base/common/CMakeLists.txt index 7dfb9bc10c0..e5e18669ebe 100644 --- a/base/common/CMakeLists.txt +++ b/base/common/CMakeLists.txt @@ -29,7 +29,7 @@ elseif (ENABLE_READLINE) endif () if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/base/common/iostream_debug_helpers.h") + set (INCLUDE_DEBUG_HELPERS "-include \"${ClickHouse_SOURCE_DIR}/base/common/iostream_debug_helpers.h\"") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") endif () @@ -45,7 +45,7 @@ if (USE_INTERNAL_CCTZ) set_source_files_properties(DateLUTImpl.cpp PROPERTIES COMPILE_DEFINITIONS USE_INTERNAL_CCTZ) endif() -target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..) +target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..") if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES) target_link_libraries(common PUBLIC -Wl,-U,_inside_main) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 363f281584e..9e60181e802 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -25,7 +25,7 @@ #if defined(__PPC__) -#if !__clang__ +#if !defined(__clang__) #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif #endif @@ -1266,7 +1266,7 @@ public: }; #if defined(__PPC__) -#if !__clang__ +#if !defined(__clang__) #pragma GCC diagnostic pop #endif #endif diff --git a/base/common/DecomposedFloat.h b/base/common/DecomposedFloat.h new file mode 100644 index 00000000000..078ba823c15 --- /dev/null +++ b/base/common/DecomposedFloat.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include +#include +#include + + +/// Allows to check the internals of IEEE-754 floating point number. + +template struct FloatTraits; + +template <> +struct FloatTraits +{ + using UInt = uint32_t; + static constexpr size_t bits = 32; + static constexpr size_t exponent_bits = 8; + static constexpr size_t mantissa_bits = bits - exponent_bits - 1; +}; + +template <> +struct FloatTraits +{ + using UInt = uint64_t; + static constexpr size_t bits = 64; + static constexpr size_t exponent_bits = 11; + static constexpr size_t mantissa_bits = bits - exponent_bits - 1; +}; + + +/// x = sign * (2 ^ normalized_exponent) * (1 + mantissa * 2 ^ -mantissa_bits) +/// x = sign * (2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits)) +template +struct DecomposedFloat +{ + using Traits = FloatTraits; + + DecomposedFloat(T x) + { + memcpy(&x_uint, &x, sizeof(x)); + } + + typename Traits::UInt x_uint; + + bool is_negative() const + { + return x_uint >> (Traits::bits - 1); + } + + /// Returns 0 for both +0. and -0. + int sign() const + { + return (exponent() == 0 && mantissa() == 0) + ? 0 + : (is_negative() + ? -1 + : 1); + } + + uint16_t exponent() const + { + return (x_uint >> (Traits::mantissa_bits)) & (((1ull << (Traits::exponent_bits + 1)) - 1) >> 1); + } + + int16_t normalized_exponent() const + { + return int16_t(exponent()) - ((1ull << (Traits::exponent_bits - 1)) - 1); + } + + uint64_t mantissa() const + { + return x_uint & ((1ull << Traits::mantissa_bits) - 1); + } + + int64_t mantissa_with_sign() const + { + return is_negative() ? -mantissa() : mantissa(); + } + + /// NOTE Probably floating point instructions can be better. + bool is_integer_in_representable_range() const + { + return x_uint == 0 + || (normalized_exponent() >= 0 /// The number is not less than one + /// The number is inside the range where every integer has exact representation in float + && normalized_exponent() <= static_cast(Traits::mantissa_bits) + /// After multiplying by 2^exp, the fractional part becomes zero, means the number is integer + && ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0)); + } + + + /// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic. + /// Infinities are compared correctly. NaNs are treat similarly to infinities, so they can be less than all numbers. + /// (note that we need total order) + template + int compare(Int rhs) + { + if (rhs == 0) + return sign(); + + /// Different signs + if (is_negative() && rhs > 0) + return -1; + if (!is_negative() && rhs < 0) + return 1; + + /// Fractional number with magnitude less than one + if (normalized_exponent() < 0) + { + if (!is_negative()) + return rhs > 0 ? -1 : 1; + else + return rhs >= 0 ? -1 : 1; + } + + /// The case of the most negative integer + if constexpr (is_signed_v) + { + if (rhs == std::numeric_limits::lowest()) + { + assert(is_negative()); + + if (normalized_exponent() < static_cast(8 * sizeof(Int) - is_signed_v)) + return 1; + if (normalized_exponent() > static_cast(8 * sizeof(Int) - is_signed_v)) + return -1; + + if (mantissa() == 0) + return 0; + else + return -1; + } + } + + /// Too large number: abs(float) > abs(rhs). Also the case with infinities and NaN. + if (normalized_exponent() >= static_cast(8 * sizeof(Int) - is_signed_v)) + return is_negative() ? -1 : 1; + + using UInt = make_unsigned_t; + UInt uint_rhs = rhs < 0 ? -rhs : rhs; + + /// Smaller octave: abs(rhs) < abs(float) + if (uint_rhs < (static_cast(1) << normalized_exponent())) + return is_negative() ? -1 : 1; + + /// Larger octave: abs(rhs) > abs(float) + if (normalized_exponent() + 1 < static_cast(8 * sizeof(Int) - is_signed_v) + && uint_rhs >= (static_cast(1) << (normalized_exponent() + 1))) + return is_negative() ? 1 : -1; + + /// The same octave + /// uint_rhs == 2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits) + + bool large_and_always_integer = normalized_exponent() >= static_cast(Traits::mantissa_bits); + + typename Traits::UInt a = large_and_always_integer + ? mantissa() << (normalized_exponent() - Traits::mantissa_bits) + : mantissa() >> (Traits::mantissa_bits - normalized_exponent()); + + typename Traits::UInt b = uint_rhs - (static_cast(1) << normalized_exponent()); + + if (a < b) + return is_negative() ? 1 : -1; + if (a > b) + return is_negative() ? -1 : 1; + + /// Float has no fractional part means that the numbers are equal. + if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0) + return 0; + else + /// Float has fractional part means its abs value is larger. + return is_negative() ? -1 : 1; + } + + + template + bool equals(Int rhs) + { + return compare(rhs) == 0; + } + + template + bool notEquals(Int rhs) + { + return compare(rhs) != 0; + } + + template + bool less(Int rhs) + { + return compare(rhs) < 0; + } + + template + bool greater(Int rhs) + { + return compare(rhs) > 0; + } + + template + bool lessOrEquals(Int rhs) + { + return compare(rhs) <= 0; + } + + template + bool greaterOrEquals(Int rhs) + { + return compare(rhs) >= 0; + } +}; + + +using DecomposedFloat64 = DecomposedFloat; +using DecomposedFloat32 = DecomposedFloat; diff --git a/src/Common/MoveOrCopyIfThrow.h b/base/common/MoveOrCopyIfThrow.h similarity index 100% rename from src/Common/MoveOrCopyIfThrow.h rename to base/common/MoveOrCopyIfThrow.h diff --git a/base/common/ReplxxLineReader.cpp b/base/common/ReplxxLineReader.cpp index fcd1610e589..7893e56d751 100644 --- a/base/common/ReplxxLineReader.cpp +++ b/base/common/ReplxxLineReader.cpp @@ -91,6 +91,10 @@ ReplxxLineReader::ReplxxLineReader( /// it also binded to M-p/M-n). rx.bind_key(Replxx::KEY::meta('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_NEXT, code); }); rx.bind_key(Replxx::KEY::meta('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_PREVIOUS, code); }); + /// By default M-BACKSPACE is KILL_TO_WHITESPACE_ON_LEFT, while in readline it is backward-kill-word + rx.bind_key(Replxx::KEY::meta(Replxx::KEY::BACKSPACE), [this](char32_t code) { return rx.invoke(Replxx::ACTION::KILL_TO_BEGINING_OF_WORD, code); }); + /// By default C-w is KILL_TO_BEGINING_OF_WORD, while in readline it is unix-word-rubout + rx.bind_key(Replxx::KEY::control('W'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::KILL_TO_WHITESPACE_ON_LEFT, code); }); rx.bind_key(Replxx::KEY::meta('E'), [this](char32_t) { openEditor(); return Replxx::ACTION_RESULT::CONTINUE; }); } diff --git a/base/common/arithmeticOverflow.h b/base/common/arithmeticOverflow.h index a92fe56b9cb..175e75a62f4 100644 --- a/base/common/arithmeticOverflow.h +++ b/base/common/arithmeticOverflow.h @@ -25,6 +25,12 @@ namespace common return x - y; } + template + inline auto NO_SANITIZE_UNDEFINED negateIgnoreOverflow(T x) + { + return -x; + } + template inline bool addOverflow(T x, T y, T & res) { @@ -50,27 +56,33 @@ namespace common } template <> - inline bool addOverflow(__int128 x, __int128 y, __int128 & res) + inline bool addOverflow(Int128 x, Int128 y, Int128 & res) { - static constexpr __int128 min_int128 = minInt128(); - static constexpr __int128 max_int128 = maxInt128(); res = addIgnoreOverflow(x, y); - return (y > 0 && x > max_int128 - y) || (y < 0 && x < min_int128 - y); + return (y > 0 && x > std::numeric_limits::max() - y) || + (y < 0 && x < std::numeric_limits::min() - y); } template <> - inline bool addOverflow(wInt256 x, wInt256 y, wInt256 & res) + inline bool addOverflow(UInt128 x, UInt128 y, UInt128 & res) { res = addIgnoreOverflow(x, y); - return (y > 0 && x > std::numeric_limits::max() - y) || - (y < 0 && x < std::numeric_limits::min() - y); + return x > std::numeric_limits::max() - y; } template <> - inline bool addOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) + inline bool addOverflow(Int256 x, Int256 y, Int256 & res) { res = addIgnoreOverflow(x, y); - return x > std::numeric_limits::max() - y; + return (y > 0 && x > std::numeric_limits::max() - y) || + (y < 0 && x < std::numeric_limits::min() - y); + } + + template <> + inline bool addOverflow(UInt256 x, UInt256 y, UInt256 & res) + { + res = addIgnoreOverflow(x, y); + return x > std::numeric_limits::max() - y; } template @@ -98,24 +110,30 @@ namespace common } template <> - inline bool subOverflow(__int128 x, __int128 y, __int128 & res) + inline bool subOverflow(Int128 x, Int128 y, Int128 & res) { - static constexpr __int128 min_int128 = minInt128(); - static constexpr __int128 max_int128 = maxInt128(); res = subIgnoreOverflow(x, y); - return (y < 0 && x > max_int128 + y) || (y > 0 && x < min_int128 + y); + return (y < 0 && x > std::numeric_limits::max() + y) || + (y > 0 && x < std::numeric_limits::min() + y); } template <> - inline bool subOverflow(wInt256 x, wInt256 y, wInt256 & res) + inline bool subOverflow(UInt128 x, UInt128 y, UInt128 & res) { res = subIgnoreOverflow(x, y); - return (y < 0 && x > std::numeric_limits::max() + y) || - (y > 0 && x < std::numeric_limits::min() + y); + return x < y; } template <> - inline bool subOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) + inline bool subOverflow(Int256 x, Int256 y, Int256 & res) + { + res = subIgnoreOverflow(x, y); + return (y < 0 && x > std::numeric_limits::max() + y) || + (y > 0 && x < std::numeric_limits::min() + y); + } + + template <> + inline bool subOverflow(UInt256 x, UInt256 y, UInt256 & res) { res = subIgnoreOverflow(x, y); return x < y; @@ -145,36 +163,33 @@ namespace common return __builtin_smulll_overflow(x, y, &res); } + /// Overflow check is not implemented for big integers. + template <> - inline bool mulOverflow(__int128 x, __int128 y, __int128 & res) + inline bool mulOverflow(Int128 x, Int128 y, Int128 & res) { res = mulIgnoreOverflow(x, y); - if (!x || !y) - return false; - - unsigned __int128 a = (x > 0) ? x : -x; - unsigned __int128 b = (y > 0) ? y : -y; - return mulIgnoreOverflow(a, b) / b != a; + return false; } template <> - inline bool mulOverflow(wInt256 x, wInt256 y, wInt256 & res) + inline bool mulOverflow(Int256 x, Int256 y, Int256 & res) { res = mulIgnoreOverflow(x, y); - if (!x || !y) - return false; - - wInt256 a = (x > 0) ? x : -x; - wInt256 b = (y > 0) ? y : -y; - return mulIgnoreOverflow(a, b) / b != a; + return false; } template <> - inline bool mulOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) + inline bool mulOverflow(UInt128 x, UInt128 y, UInt128 & res) { res = mulIgnoreOverflow(x, y); - if (!x || !y) - return false; - return res / y != x; + return false; + } + + template <> + inline bool mulOverflow(UInt256 x, UInt256 y, UInt256 & res) + { + res = mulIgnoreOverflow(x, y); + return false; } } diff --git a/base/common/extended_types.h b/base/common/extended_types.h index 2ae70c0f432..79209568ef5 100644 --- a/base/common/extended_types.h +++ b/base/common/extended_types.h @@ -5,16 +5,14 @@ #include #include -using Int128 = __int128; -using wInt256 = wide::integer<256, signed>; -using wUInt256 = wide::integer<256, unsigned>; +using Int128 = wide::integer<128, signed>; +using UInt128 = wide::integer<128, unsigned>; +using Int256 = wide::integer<256, signed>; +using UInt256 = wide::integer<256, unsigned>; -static_assert(sizeof(wInt256) == 32); -static_assert(sizeof(wUInt256) == 32); - -static constexpr __int128 minInt128() { return static_cast(1) << 127; } -static constexpr __int128 maxInt128() { return (static_cast(1) << 127) - 1; } +static_assert(sizeof(Int256) == 32); +static_assert(sizeof(UInt256) == 32); /// The standard library type traits, such as std::is_arithmetic, with one exception /// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior. @@ -26,7 +24,7 @@ struct is_signed }; template <> struct is_signed { static constexpr bool value = true; }; -template <> struct is_signed { static constexpr bool value = true; }; +template <> struct is_signed { static constexpr bool value = true; }; template inline constexpr bool is_signed_v = is_signed::value; @@ -37,7 +35,8 @@ struct is_unsigned static constexpr bool value = std::is_unsigned_v; }; -template <> struct is_unsigned { static constexpr bool value = true; }; +template <> struct is_unsigned { static constexpr bool value = true; }; +template <> struct is_unsigned { static constexpr bool value = true; }; template inline constexpr bool is_unsigned_v = is_unsigned::value; @@ -51,8 +50,9 @@ struct is_integer }; template <> struct is_integer { static constexpr bool value = true; }; -template <> struct is_integer { static constexpr bool value = true; }; -template <> struct is_integer { static constexpr bool value = true; }; +template <> struct is_integer { static constexpr bool value = true; }; +template <> struct is_integer { static constexpr bool value = true; }; +template <> struct is_integer { static constexpr bool value = true; }; template inline constexpr bool is_integer_v = is_integer::value; @@ -64,7 +64,11 @@ struct is_arithmetic static constexpr bool value = std::is_arithmetic_v; }; -template <> struct is_arithmetic<__int128> { static constexpr bool value = true; }; +template <> struct is_arithmetic { static constexpr bool value = true; }; +template <> struct is_arithmetic { static constexpr bool value = true; }; +template <> struct is_arithmetic { static constexpr bool value = true; }; +template <> struct is_arithmetic { static constexpr bool value = true; }; + template inline constexpr bool is_arithmetic_v = is_arithmetic::value; @@ -75,9 +79,10 @@ struct make_unsigned typedef std::make_unsigned_t type; }; -template <> struct make_unsigned { using type = unsigned __int128; }; -template <> struct make_unsigned { using type = wUInt256; }; -template <> struct make_unsigned { using type = wUInt256; }; +template <> struct make_unsigned { using type = UInt128; }; +template <> struct make_unsigned { using type = UInt128; }; +template <> struct make_unsigned { using type = UInt256; }; +template <> struct make_unsigned { using type = UInt256; }; template using make_unsigned_t = typename make_unsigned::type; @@ -87,8 +92,10 @@ struct make_signed typedef std::make_signed_t type; }; -template <> struct make_signed { using type = wInt256; }; -template <> struct make_signed { using type = wInt256; }; +template <> struct make_signed { using type = Int128; }; +template <> struct make_signed { using type = Int128; }; +template <> struct make_signed { using type = Int256; }; +template <> struct make_signed { using type = Int256; }; template using make_signed_t = typename make_signed::type; @@ -98,8 +105,10 @@ struct is_big_int static constexpr bool value = false; }; -template <> struct is_big_int { static constexpr bool value = true; }; -template <> struct is_big_int { static constexpr bool value = true; }; +template <> struct is_big_int { static constexpr bool value = true; }; +template <> struct is_big_int { static constexpr bool value = true; }; +template <> struct is_big_int { static constexpr bool value = true; }; +template <> struct is_big_int { static constexpr bool value = true; }; template inline constexpr bool is_big_int_v = is_big_int::value; diff --git a/base/common/getThreadId.cpp b/base/common/getThreadId.cpp index 700c51f21fc..054e9be9074 100644 --- a/base/common/getThreadId.cpp +++ b/base/common/getThreadId.cpp @@ -25,6 +25,10 @@ uint64_t getThreadId() current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid #elif defined(OS_FREEBSD) current_tid = pthread_getthreadid_np(); +#elif defined(OS_SUNOS) + // On Solaris-derived systems, this returns the ID of the LWP, analogous + // to a thread. + current_tid = static_cast(pthread_self()); #else if (0 != pthread_threadid_np(nullptr, ¤t_tid)) throw std::logic_error("pthread_threadid_np returned error"); diff --git a/base/common/itoa.h b/base/common/itoa.h index a02e7b68c05..4c86239de36 100644 --- a/base/common/itoa.h +++ b/base/common/itoa.h @@ -30,9 +30,8 @@ #include #include #include +#include -using int128_t = __int128; -using uint128_t = unsigned __int128; namespace impl { @@ -106,7 +105,7 @@ using UnsignedOfSize = typename SelectType uint16_t, uint32_t, uint64_t, - uint128_t + __uint128_t >::Result; /// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in @@ -313,7 +312,8 @@ namespace convert } } -static inline int digits10(uint128_t x) +template +static inline int digits10(T x) { if (x < 10ULL) return 1; @@ -346,8 +346,11 @@ static inline int digits10(uint128_t x) return 12 + digits10(x / 1000000000000ULL); } -static inline char * writeUIntText(uint128_t x, char * p) +template +static inline char * writeUIntText(T x, char * p) { + static_assert(is_unsigned_v); + int len = digits10(x); auto pp = p + len; while (x >= 100) @@ -370,14 +373,28 @@ static inline char * writeLeadingMinus(char * pos) return pos + 1; } -static inline char * writeSIntText(int128_t x, char * pos) +template +static inline char * writeSIntText(T x, char * pos) { - static constexpr int128_t min_int128 = uint128_t(1) << 127; + static_assert(std::is_same_v || std::is_same_v); - if (unlikely(x == min_int128)) + using UnsignedT = make_unsigned_t; + static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1); + + if (unlikely(x == min_int)) { - memcpy(pos, "-170141183460469231731687303715884105728", 40); - return pos + 40; + if constexpr (std::is_same_v) + { + const char * res = "-170141183460469231731687303715884105728"; + memcpy(pos, res, strlen(res)); + return pos + strlen(res); + } + else if constexpr (std::is_same_v) + { + const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968"; + memcpy(pos, res, strlen(res)); + return pos + strlen(res); + } } if (x < 0) @@ -385,7 +402,7 @@ static inline char * writeSIntText(int128_t x, char * pos) x = -x; pos = writeLeadingMinus(pos); } - return writeUIntText(static_cast(x), pos); + return writeUIntText(UnsignedT(x), pos); } } @@ -403,13 +420,25 @@ inline char * itoa(char8_t i, char * p) } template <> -inline char * itoa(uint128_t i, char * p) +inline char * itoa(UInt128 i, char * p) { return impl::writeUIntText(i, p); } template <> -inline char * itoa(int128_t i, char * p) +inline char * itoa(Int128 i, char * p) +{ + return impl::writeSIntText(i, p); +} + +template <> +inline char * itoa(UInt256 i, char * p) +{ + return impl::writeUIntText(i, p); +} + +template <> +inline char * itoa(Int256 i, char * p) { return impl::writeSIntText(i, p); } diff --git a/base/common/strong_typedef.h b/base/common/strong_typedef.h index 77b83bfa6e5..a1e2b253aa7 100644 --- a/base/common/strong_typedef.h +++ b/base/common/strong_typedef.h @@ -4,7 +4,8 @@ #include #include -template + +template struct StrongTypedef { private: @@ -38,14 +39,16 @@ public: bool operator==(const Self & rhs) const { return t == rhs.t; } bool operator<(const Self & rhs) const { return t < rhs.t; } + bool operator>(const Self & rhs) const { return t > rhs.t; } T & toUnderType() { return t; } const T & toUnderType() const { return t; } }; + namespace std { - template + template struct hash> { size_t operator()(const StrongTypedef & x) const diff --git a/base/common/throwError.h b/base/common/throwError.h index b495a0fbc7a..dd352913e78 100644 --- a/base/common/throwError.h +++ b/base/common/throwError.h @@ -1,13 +1,15 @@ #pragma once + #include + /// Throw DB::Exception-like exception before its definition. /// DB::Exception derived from Poco::Exception derived from std::exception. -/// DB::Exception generally cought as Poco::Exception. std::exception generally has other catch blocks and could lead to other outcomes. +/// DB::Exception generally caught as Poco::Exception. std::exception generally has other catch blocks and could lead to other outcomes. /// DB::Exception is not defined yet. It'd better to throw Poco::Exception but we do not want to include any big header here, even . /// So we throw some std::exception instead in the hope its catch block is the same as DB::Exception one. template -inline void throwError(const T & err) +[[noreturn]] inline void throwError(const T & err) { throw std::runtime_error(err); } diff --git a/base/common/time.h b/base/common/time.h index 1bf588b7cb3..d0b8e94a9a5 100644 --- a/base/common/time.h +++ b/base/common/time.h @@ -2,7 +2,7 @@ #include -#if defined (OS_DARWIN) +#if defined (OS_DARWIN) || defined (OS_SUNOS) # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC #elif defined (OS_FREEBSD) # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST diff --git a/base/common/types.h b/base/common/types.h index bd5c28fe73b..e178653f7c6 100644 --- a/base/common/types.h +++ b/base/common/types.h @@ -13,7 +13,12 @@ using char8_t = unsigned char; #endif /// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713 +#if !defined(PVS_STUDIO) /// But PVS-Studio does not treat it correctly. using UInt8 = char8_t; +#else +using UInt8 = uint8_t; +#endif + using UInt16 = uint16_t; using UInt32 = uint32_t; using UInt64 = uint64_t; diff --git a/base/common/wide_integer.h b/base/common/wide_integer.h index c9d1eaa32aa..419b4e4558c 100644 --- a/base/common/wide_integer.h +++ b/base/common/wide_integer.h @@ -58,9 +58,11 @@ public: using signed_base_type = int64_t; // ctors - constexpr integer() noexcept; + constexpr integer() noexcept = default; + template constexpr integer(T rhs) noexcept; + template constexpr integer(std::initializer_list il) noexcept; @@ -108,9 +110,9 @@ public: constexpr explicit operator bool() const noexcept; template - using __integral_not_wide_integer_class = typename std::enable_if::value, T>::type; + using _integral_not_wide_integer_class = typename std::enable_if::value, T>::type; - template > + template > constexpr operator T() const noexcept; constexpr operator long double() const noexcept; @@ -119,25 +121,27 @@ public: struct _impl; + base_type items[_impl::item_count]; + private: template friend class integer; friend class std::numeric_limits>; friend class std::numeric_limits>; - - base_type items[_impl::item_count]; }; template static constexpr bool ArithmeticConcept() noexcept; + template -using __only_arithmetic = typename std::enable_if() && ArithmeticConcept()>::type; +using _only_arithmetic = typename std::enable_if() && ArithmeticConcept()>::type; template static constexpr bool IntegralConcept() noexcept; + template -using __only_integer = typename std::enable_if() && IntegralConcept()>::type; +using _only_integer = typename std::enable_if() && IntegralConcept()>::type; // Unary operators template @@ -153,54 +157,55 @@ constexpr integer operator+(const integer & lhs) noe template std::common_type_t, integer> constexpr operator*(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator*(const Arithmetic & rhs, const Arithmetic2 & lhs); template std::common_type_t, integer> constexpr operator/(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator/(const Arithmetic & rhs, const Arithmetic2 & lhs); template std::common_type_t, integer> constexpr operator+(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator+(const Arithmetic & rhs, const Arithmetic2 & lhs); template std::common_type_t, integer> constexpr operator-(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator-(const Arithmetic & rhs, const Arithmetic2 & lhs); template std::common_type_t, integer> constexpr operator%(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator%(const Integral & rhs, const Integral2 & lhs); template std::common_type_t, integer> constexpr operator&(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator&(const Integral & rhs, const Integral2 & lhs); template std::common_type_t, integer> constexpr operator|(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator|(const Integral & rhs, const Integral2 & lhs); template std::common_type_t, integer> constexpr operator^(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator^(const Integral & rhs, const Integral2 & lhs); // TODO: Integral template constexpr integer operator<<(const integer & lhs, int n) noexcept; + template constexpr integer operator>>(const integer & lhs, int n) noexcept; @@ -217,32 +222,32 @@ constexpr integer operator>>(const integer & lhs, In template constexpr bool operator<(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator<(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator>(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator>(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator<=(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator<=(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator>=(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator>=(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator==(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator==(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator!=(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator!=(const Arithmetic & rhs, const Arithmetic2 & lhs); } diff --git a/base/common/wide_integer_impl.h b/base/common/wide_integer_impl.h index 5b981326e25..725caec6a3e 100644 --- a/base/common/wide_integer_impl.h +++ b/base/common/wide_integer_impl.h @@ -5,6 +5,7 @@ /// (See at http://www.boost.org/LICENSE_1_0.txt) #include "throwError.h" + #include #include #include @@ -81,7 +82,7 @@ public: res.items[T::_impl::big(0)] = std::numeric_limits::signed_base_type>::min(); return res; } - return 0; + return wide::integer(0); } static constexpr wide::integer max() noexcept @@ -176,7 +177,7 @@ struct integer::_impl constexpr static bool is_negative(const integer & n) noexcept { if constexpr (std::is_same_v) - return static_cast(n.items[big(0)]) < 0; + return static_cast(n.items[integer::_impl::big(0)]) < 0; else return false; } @@ -193,40 +194,36 @@ struct integer::_impl template constexpr static integer make_positive(const integer & n) noexcept { - return is_negative(n) ? operator_unary_minus(n) : n; + return is_negative(n) ? integer(operator_unary_minus(n)) : n; } template __attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept { - if constexpr (std::is_same_v) - return f; - else if constexpr (std::is_signed_v) + if constexpr (std::is_signed_v) return static_cast(f); else return static_cast(f); } template - constexpr static void wide_integer_from_bultin(integer & self, Integral rhs) noexcept + constexpr static void wide_integer_from_builtin(integer & self, Integral rhs) noexcept { - self.items[0] = _impl::to_Integral(rhs); - if constexpr (std::is_same_v) - self.items[1] = rhs >> base_bits; + static_assert(sizeof(Integral) <= sizeof(base_type)); - constexpr const unsigned start = (sizeof(Integral) == 16) ? 2 : 1; + self.items[0] = _impl::to_Integral(rhs); if constexpr (std::is_signed_v) { if (rhs < 0) { - for (unsigned i = start; i < item_count; ++i) + for (size_t i = 1; i < item_count; ++i) self.items[i] = -1; return; } } - for (unsigned i = start; i < item_count; ++i) + for (size_t i = 1; i < item_count; ++i) self.items[i] = 0; } @@ -239,7 +236,8 @@ struct integer::_impl * a_(n - 1) = a_n * max_int + b2, a_n <= max_int <- base case. */ template - constexpr static void set_multiplier(integer & self, T t) noexcept { + constexpr static void set_multiplier(integer & self, T t) noexcept + { constexpr uint64_t max_int = std::numeric_limits::max(); /// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast). @@ -260,7 +258,8 @@ struct integer::_impl self += static_cast(t - alpha * static_cast(max_int)); // += b_i } - constexpr static void wide_integer_from_bultin(integer& self, double rhs) noexcept { + constexpr static void wide_integer_from_builtin(integer& self, double rhs) noexcept + { constexpr int64_t max_int = std::numeric_limits::max(); constexpr int64_t min_int = std::numeric_limits::min(); @@ -271,9 +270,13 @@ struct integer::_impl /// As to_Integral does a static_cast to int64_t, it may result in UB. /// The necessary check here is that long double has enough significant (mantissa) bits to store the /// int64_t max value precisely. + + //TODO Be compatible with Apple aarch64 +#if not (defined(__APPLE__) && defined(__aarch64__)) static_assert(LDBL_MANT_DIG >= 64, "On your system long double has less than 64 precision bits," "which may result in UB when initializing double from int64_t"); +#endif if ((rhs > 0 && rhs < static_cast(max_int)) || (rhs < 0 && rhs > static_cast(min_int))) { @@ -379,13 +382,13 @@ struct integer::_impl if (bit_shift) lhs.items[big(items_shift)] |= std::numeric_limits::max() << (base_bits - bit_shift); - for (unsigned i = item_count - items_shift; i < items_shift; ++i) - lhs.items[little(i)] = std::numeric_limits::max(); + for (unsigned i = 0; i < items_shift; ++i) + lhs.items[big(i)] = std::numeric_limits::max(); } else { - for (unsigned i = item_count - items_shift; i < items_shift; ++i) - lhs.items[little(i)] = 0; + for (unsigned i = 0; i < items_shift; ++i) + lhs.items[big(i)] = 0; } return lhs; @@ -393,23 +396,23 @@ struct integer::_impl private: template - constexpr static base_type get_item(const T & x, unsigned number) + constexpr static base_type get_item(const T & x, unsigned idx) { if constexpr (IsWideInteger::value) { - if (number < T::_impl::item_count) - return x.items[number]; + if (idx < T::_impl::item_count) + return x.items[idx]; return 0; } else { if constexpr (sizeof(T) <= sizeof(base_type)) { - if (!number) + if (0 == idx) return x; } - else if (number * sizeof(base_type) < sizeof(T)) - return x >> (number * base_bits); // & std::numeric_limits::max() + else if (idx * sizeof(base_type) < sizeof(T)) + return x >> (idx * base_bits); // & std::numeric_limits::max() return 0; } } @@ -435,7 +438,7 @@ private: for (unsigned i = 1; i < item_count; ++i) { - if (underflows[i-1]) + if (underflows[i - 1]) { base_type & res_item = res.items[little(i)]; if (res_item == 0) @@ -468,7 +471,7 @@ private: for (unsigned i = 1; i < item_count; ++i) { - if (overflows[i-1]) + if (overflows[i - 1]) { base_type & res_item = res.items[little(i)]; ++res_item; @@ -528,6 +531,17 @@ private: res.items[little(2)] = r12 >> 64; return res; } + else if constexpr (Bits == 128 && sizeof(base_type) == 8) + { + using CompilerUInt128 = unsigned __int128; + CompilerUInt128 a = (CompilerUInt128(lhs.items[1]) << 64) + lhs.items[0]; + CompilerUInt128 b = (CompilerUInt128(rhs.items[1]) << 64) + rhs.items[0]; + CompilerUInt128 c = a * b; + integer res; + res.items[0] = c; + res.items[1] = c >> 64; + return res; + } else { integer res{}; @@ -653,7 +667,7 @@ public: } template - constexpr static bool operator_more(const integer & lhs, const T & rhs) noexcept + constexpr static bool operator_greater(const integer & lhs, const T & rhs) noexcept { if constexpr (should_keep_size()) { @@ -673,7 +687,7 @@ public: else { static_assert(IsWideInteger::value); - return std::common_type_t, T>::_impl::operator_more(T(lhs), rhs); + return std::common_type_t, T>::_impl::operator_greater(T(lhs), rhs); } } @@ -760,7 +774,6 @@ public: } } -private: template constexpr static bool is_zero(const T & x) { @@ -777,46 +790,65 @@ private: } /// returns quotient as result and remainder in numerator. - template - constexpr static T divide(T & numerator, T && denominator) + template + constexpr static integer divide(integer & numerator, integer denominator) { - if (is_zero(denominator)) - throwError("divide by zero"); + static_assert(std::is_unsigned_v); - T & n = numerator; - T & d = denominator; - T x = 1; - T quotient = 0; - - while (!operator_more(d, n) && operator_eq(operator_amp(shift_right(d, base_bits * item_count - 1), 1), 0)) + if constexpr (Bits == 128 && sizeof(base_type) == 8) { - x = shift_left(x, 1); - d = shift_left(d, 1); + using CompilerUInt128 = unsigned __int128; + + CompilerUInt128 a = (CompilerUInt128(numerator.items[1]) << 64) + numerator.items[0]; + CompilerUInt128 b = (CompilerUInt128(denominator.items[1]) << 64) + denominator.items[0]; + CompilerUInt128 c = a / b; + + integer res; + res.items[0] = c; + res.items[1] = c >> 64; + + CompilerUInt128 remainder = a - b * c; + numerator.items[0] = remainder; + numerator.items[1] = remainder >> 64; + + return res; } - while (!operator_eq(x, 0)) + if (is_zero(denominator)) + throwError("Division by zero"); + + integer x = 1; + integer quotient = 0; + + while (!operator_greater(denominator, numerator) && is_zero(operator_amp(shift_right(denominator, Bits2 - 1), 1))) { - if (!operator_more(d, n)) + x = shift_left(x, 1); + denominator = shift_left(denominator, 1); + } + + while (!is_zero(x)) + { + if (!operator_greater(denominator, numerator)) { - n = operator_minus(n, d); + numerator = operator_minus(numerator, denominator); quotient = operator_pipe(quotient, x); } x = shift_right(x, 1); - d = shift_right(d, 1); + denominator = shift_right(denominator, 1); } return quotient; } -public: template constexpr static auto operator_slash(const integer & lhs, const T & rhs) { if constexpr (should_keep_size()) { - integer numerator = make_positive(lhs); - integer quotient = divide(numerator, make_positive(integer(rhs))); + integer numerator = make_positive(lhs); + integer denominator = make_positive(integer(rhs)); + integer quotient = integer::_impl::divide(numerator, std::move(denominator)); if (std::is_same_v && is_negative(rhs) != is_negative(lhs)) quotient = operator_unary_minus(quotient); @@ -834,8 +866,9 @@ public: { if constexpr (should_keep_size()) { - integer remainder = make_positive(lhs); - divide(remainder, make_positive(integer(rhs))); + integer remainder = make_positive(lhs); + integer denominator = make_positive(integer(rhs)); + integer::_impl::divide(remainder, std::move(denominator)); if (std::is_same_v && is_negative(lhs)) remainder = operator_unary_minus(remainder); @@ -901,7 +934,7 @@ public: ++c; } else - throwError("invalid char from"); + throwError("Invalid char from"); } } else @@ -909,7 +942,7 @@ public: while (*c) { if (*c < '0' || *c > '9') - throwError("invalid char from"); + throwError("Invalid char from"); res = multiply(res, 10U); res = plus(res, *c - '0'); @@ -926,11 +959,6 @@ public: // Members -template -constexpr integer::integer() noexcept - : items{} -{} - template template constexpr integer::integer(T rhs) noexcept @@ -939,7 +967,7 @@ constexpr integer::integer(T rhs) noexcept if constexpr (IsWideInteger::value) _impl::wide_integer_from_wide_integer(*this, rhs); else - _impl::wide_integer_from_bultin(*this, rhs); + _impl::wide_integer_from_builtin(*this, rhs); } template @@ -952,10 +980,19 @@ constexpr integer::integer(std::initializer_list il) noexcept if constexpr (IsWideInteger::value) _impl::wide_integer_from_wide_integer(*this, *il.begin()); else - _impl::wide_integer_from_bultin(*this, *il.begin()); + _impl::wide_integer_from_builtin(*this, *il.begin()); + } + else if (il.size() == 0) + { + _impl::wide_integer_from_builtin(*this, 0); } else - _impl::wide_integer_from_bultin(*this, 0); + { + auto it = il.begin(); + for (size_t i = 0; i < _impl::item_count; ++i) + if (it < il.end()) + items[i] = *it; + } } template @@ -970,7 +1007,7 @@ template template constexpr integer & integer::operator=(T rhs) noexcept { - _impl::wide_integer_from_bultin(*this, rhs); + _impl::wide_integer_from_builtin(*this, rhs); return *this; } @@ -1053,7 +1090,7 @@ constexpr integer & integer::operator>>=(int n) noex { if (static_cast(n) >= Bits) { - if (is_negative(*this)) + if (_impl::is_negative(*this)) *this = -1; else *this = 0; @@ -1103,16 +1140,17 @@ template template constexpr integer::operator T() const noexcept { - if constexpr (std::is_same_v) - { - static_assert(Bits >= 128); - return (__int128(items[1]) << 64) | items[0]; - } - else - { - static_assert(std::numeric_limits::is_integer); - return items[0]; - } + static_assert(std::numeric_limits::is_integer); + + /// NOTE: memcpy will suffice, but unfortunately, this function is constexpr. + + using UnsignedT = std::make_unsigned_t; + + UnsignedT res{}; + for (unsigned i = 0; i < _impl::item_count && i < (sizeof(T) + sizeof(base_type) - 1) / sizeof(base_type); ++i) + res += UnsignedT(items[i]) << (sizeof(base_type) * 8 * i); + + return res; } template @@ -1276,7 +1314,7 @@ template constexpr integer operator<<(const integer & lhs, int n) noexcept { if (static_cast(n) >= Bits) - return 0; + return integer(0); if (n <= 0) return lhs; return integer::_impl::shift_left(lhs, n); @@ -1285,7 +1323,7 @@ template constexpr integer operator>>(const integer & lhs, int n) noexcept { if (static_cast(n) >= Bits) - return 0; + return integer(0); if (n <= 0) return lhs; return integer::_impl::shift_right(lhs, n); @@ -1305,7 +1343,7 @@ constexpr bool operator<(const Arithmetic & lhs, const Arithmetic2 & rhs) template constexpr bool operator>(const integer & lhs, const integer & rhs) { - return std::common_type_t, integer>::_impl::operator_more(lhs, rhs); + return std::common_type_t, integer>::_impl::operator_greater(lhs, rhs); } template constexpr bool operator>(const Arithmetic & lhs, const Arithmetic2 & rhs) @@ -1328,7 +1366,7 @@ constexpr bool operator<=(const Arithmetic & lhs, const Arithmetic2 & rhs) template constexpr bool operator>=(const integer & lhs, const integer & rhs) { - return std::common_type_t, integer>::_impl::operator_more(lhs, rhs) + return std::common_type_t, integer>::_impl::operator_greater(lhs, rhs) || std::common_type_t, integer>::_impl::operator_eq(lhs, rhs); } template diff --git a/base/common/wide_integer_to_string.h b/base/common/wide_integer_to_string.h index 9908ef4be7a..8b794fe9bcb 100644 --- a/base/common/wide_integer_to_string.h +++ b/base/common/wide_integer_to_string.h @@ -1,9 +1,12 @@ #pragma once #include +#include +#include #include "wide_integer.h" + namespace wide { @@ -33,3 +36,34 @@ inline std::string to_string(const integer & n) } } + + +template +std::ostream & operator<<(std::ostream & out, const wide::integer & value) +{ + return out << to_string(value); +} + + +/// See https://fmt.dev/latest/api.html#formatting-user-defined-types +template +struct fmt::formatter> +{ + constexpr auto parse(format_parse_context & ctx) + { + auto it = ctx.begin(); + auto end = ctx.end(); + + /// Only support {}. + if (it != end && *it != '}') + throw format_error("invalid format"); + + return it; + } + + template + auto format(const wide::integer & value, FormatContext & ctx) + { + return format_to(ctx.out(), "{}", to_string(value)); + } +}; diff --git a/base/common/ya.make.in b/base/common/ya.make.in index b5c2bbc1717..3deb36a2c71 100644 --- a/base/common/ya.make.in +++ b/base/common/ya.make.in @@ -35,7 +35,7 @@ PEERDIR( CFLAGS(-g0) SRCS( - + ) END() diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index 83384038b7c..6aa65942445 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -26,8 +26,6 @@ #include #include #include -#include -#include #include #include #include @@ -59,6 +57,7 @@ #include #include #include +#include #if !defined(ARCADIA_BUILD) # include @@ -70,6 +69,7 @@ #endif #include +namespace fs = std::filesystem; DB::PipeFDs signal_pipe; @@ -437,11 +437,11 @@ static void sanitizerDeathCallback() static std::string createDirectory(const std::string & file) { - auto path = Poco::Path(file).makeParent(); - if (path.toString().empty()) + fs::path path = fs::path(file).parent_path(); + if (path.empty()) return ""; - Poco::File(path).createDirectories(); - return path.toString(); + fs::create_directories(path); + return path; }; @@ -449,7 +449,7 @@ static bool tryCreateDirectories(Poco::Logger * logger, const std::string & path { try { - Poco::File(path).createDirectories(); + fs::create_directories(path); return true; } catch (...) @@ -468,9 +468,9 @@ void BaseDaemon::reloadConfiguration() * instead of using files specified in config.xml. * (It's convenient to log in console when you start server without any command line parameters.) */ - config_path = config().getString("config-file", "config.xml"); + config_path = config().getString("config-file", getDefaultConfigFileName()); DB::ConfigProcessor config_processor(config_path, false, true); - config_processor.setConfigPath(Poco::Path(config_path).makeParent().toString()); + config_processor.setConfigPath(fs::path(config_path).parent_path()); loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true); if (last_configuration != nullptr) @@ -516,21 +516,28 @@ std::string BaseDaemon::getDefaultCorePath() const return "/opt/cores/"; } +std::string BaseDaemon::getDefaultConfigFileName() const +{ + return "config.xml"; +} + void BaseDaemon::closeFDs() { #if defined(OS_FREEBSD) || defined(OS_DARWIN) - Poco::File proc_path{"/dev/fd"}; + fs::path proc_path{"/dev/fd"}; #else - Poco::File proc_path{"/proc/self/fd"}; + fs::path proc_path{"/proc/self/fd"}; #endif - if (proc_path.isDirectory()) /// Hooray, proc exists + if (fs::is_directory(proc_path)) /// Hooray, proc exists { - std::vector fds; - /// in /proc/self/fd directory filenames are numeric file descriptors - proc_path.list(fds); - for (const auto & fd_str : fds) + /// in /proc/self/fd directory filenames are numeric file descriptors. + /// Iterate directory separately from closing fds to avoid closing iterated directory fd. + std::vector fds; + for (const auto & path : fs::directory_iterator(proc_path)) + fds.push_back(DB::parse(path.path().filename())); + + for (const auto & fd : fds) { - int fd = DB::parse(fd_str); if (fd > 2 && fd != signal_pipe.fds_rw[0] && fd != signal_pipe.fds_rw[1]) ::close(fd); } @@ -592,7 +599,7 @@ void BaseDaemon::initialize(Application & self) { /** When creating pid file and looking for config, will search for paths relative to the working path of the program when started. */ - std::string path = Poco::Path(config().getString("application.path")).setFileName("").toString(); + std::string path = fs::path(config().getString("application.path")).replace_filename(""); if (0 != chdir(path.c_str())) throw Poco::Exception("Cannot change directory to " + path); } @@ -640,7 +647,7 @@ void BaseDaemon::initialize(Application & self) std::string log_path = config().getString("logger.log", ""); if (!log_path.empty()) - log_path = Poco::Path(log_path).setFileName("").toString(); + log_path = fs::path(log_path).replace_filename(""); /** Redirect stdout, stderr to separate files in the log directory (or in the specified file). * Some libraries write to stderr in case of errors in debug mode, @@ -703,8 +710,7 @@ void BaseDaemon::initialize(Application & self) tryCreateDirectories(&logger(), core_path); - Poco::File cores = core_path; - if (!(cores.exists() && cores.isDirectory())) + if (!(fs::exists(core_path) && fs::is_directory(core_path))) { core_path = !log_path.empty() ? log_path : "/opt/"; tryCreateDirectories(&logger(), core_path); diff --git a/base/daemon/BaseDaemon.h b/base/daemon/BaseDaemon.h index 8b9d765cf2e..3d47d85a9f5 100644 --- a/base/daemon/BaseDaemon.h +++ b/base/daemon/BaseDaemon.h @@ -149,6 +149,8 @@ protected: virtual std::string getDefaultCorePath() const; + virtual std::string getDefaultConfigFileName() const; + std::optional pid_file; std::atomic_bool is_cancelled{false}; diff --git a/base/daemon/SentryWriter.cpp b/base/daemon/SentryWriter.cpp index 29430b65983..3571c64edd6 100644 --- a/base/daemon/SentryWriter.cpp +++ b/base/daemon/SentryWriter.cpp @@ -1,6 +1,5 @@ #include -#include #include #include @@ -9,6 +8,7 @@ #include #include +#include #include #include #include @@ -24,6 +24,7 @@ # include # include +namespace fs = std::filesystem; namespace { @@ -52,8 +53,7 @@ void setExtras() sentry_set_extra("physical_cpu_cores", sentry_value_new_int32(getNumberOfPhysicalCPUCores())); if (!server_data_path.empty()) - sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix( - Poco::File(server_data_path).freeSpace()).c_str())); + sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix(fs::space(server_data_path).free).c_str())); } void sentry_logger(sentry_level_e level, const char * message, va_list args, void *) @@ -101,7 +101,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config) auto * logger = &Poco::Logger::get("SentryWriter"); if (config.getBool("send_crash_reports.enabled", false)) { - if (debug || (strlen(VERSION_OFFICIAL) > 0)) + if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560 { enabled = true; } @@ -109,12 +109,12 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config) if (enabled) { server_data_path = config.getString("path", ""); - const std::filesystem::path & default_tmp_path = std::filesystem::path(config.getString("tmp_path", Poco::Path::temp())) / "sentry"; + const std::filesystem::path & default_tmp_path = fs::path(config.getString("tmp_path", fs::temp_directory_path())) / "sentry"; const std::string & endpoint = config.getString("send_crash_reports.endpoint"); const std::string & temp_folder_path = config.getString("send_crash_reports.tmp_path", default_tmp_path); - Poco::File(temp_folder_path).createDirectories(); + fs::create_directories(temp_folder_path); sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown sentry_options_set_release(options, VERSION_STRING_SHORT); diff --git a/base/ext/scope_guard_safe.h b/base/ext/scope_guard_safe.h new file mode 100644 index 00000000000..55140213572 --- /dev/null +++ b/base/ext/scope_guard_safe.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include + +/// Same as SCOPE_EXIT() but block the MEMORY_LIMIT_EXCEEDED errors. +/// +/// Typical example of SCOPE_EXIT_MEMORY() usage is when code under it may do +/// some tiny allocations, that may fail under high memory pressure or/and low +/// max_memory_usage (and related limits). +/// +/// NOTE: it should be used with caution. +#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \ + MemoryTracker::LockExceptionInThread \ + lock_memory_tracker(VariableContext::Global); \ + __VA_ARGS__; \ +) + +/// Same as SCOPE_EXIT() but try/catch/tryLogCurrentException any exceptions. +/// +/// SCOPE_EXIT_SAFE() should be used in case the exception during the code +/// under SCOPE_EXIT() is not "that fatal" and error message in log is enough. +/// +/// Good example is calling CurrentThread::detachQueryIfNotDetached(). +/// +/// Anti-pattern is calling WriteBuffer::finalize() under SCOPE_EXIT_SAFE() +/// (since finalize() can do final write and it is better to fail abnormally +/// instead of ignoring write error). +/// +/// NOTE: it should be used with double caution. +#define SCOPE_EXIT_SAFE(...) SCOPE_EXIT( \ + try \ + { \ + __VA_ARGS__; \ + } \ + catch (...) \ + { \ + tryLogCurrentException(__PRETTY_FUNCTION__); \ + } \ +) + +/// Same as SCOPE_EXIT() but: +/// - block the MEMORY_LIMIT_EXCEEDED errors, +/// - try/catch/tryLogCurrentException any exceptions. +/// +/// SCOPE_EXIT_MEMORY_SAFE() can be used when the error can be ignored, and in +/// addition to SCOPE_EXIT_SAFE() it will also lock MEMORY_LIMIT_EXCEEDED to +/// avoid such exceptions. +/// +/// It does exists as a separate helper, since you do not need to lock +/// MEMORY_LIMIT_EXCEEDED always (there are cases when code under SCOPE_EXIT does +/// not do any allocations, while LockExceptionInThread increment atomic +/// variable). +/// +/// NOTE: it should be used with triple caution. +#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \ + try \ + { \ + MemoryTracker::LockExceptionInThread \ + lock_memory_tracker(VariableContext::Global); \ + __VA_ARGS__; \ + } \ + catch (...) \ + { \ + tryLogCurrentException(__PRETTY_FUNCTION__); \ + } \ +) diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt index e785e2ab2ce..8cba91de33f 100644 --- a/base/glibc-compatibility/CMakeLists.txt +++ b/base/glibc-compatibility/CMakeLists.txt @@ -15,7 +15,7 @@ if (GLIBC_COMPATIBILITY) add_headers_and_sources(glibc_compatibility .) add_headers_and_sources(glibc_compatibility musl) - if (ARCH_ARM) + if (ARCH_AARCH64) list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s) set (musl_arch_include_dir musl/aarch64) elseif (ARCH_AMD64) diff --git a/base/glibc-compatibility/musl/lgamma.c b/base/glibc-compatibility/musl/lgamma.c index fb9d105d0fa..5e959504e29 100644 --- a/base/glibc-compatibility/musl/lgamma.c +++ b/base/glibc-compatibility/musl/lgamma.c @@ -78,6 +78,9 @@ * */ +// Disable warnings by PVS-Studio +//-V::GA + static const double pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */ a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */ diff --git a/base/glibc-compatibility/musl/lgammal.c b/base/glibc-compatibility/musl/lgammal.c index b158748ce1f..775559f13b6 100644 --- a/base/glibc-compatibility/musl/lgammal.c +++ b/base/glibc-compatibility/musl/lgammal.c @@ -85,6 +85,9 @@ * */ +// Disable warnings by PVS-Studio +//-V::GA + #include #include #include "libm.h" diff --git a/base/glibc-compatibility/musl/libm.h b/base/glibc-compatibility/musl/libm.h index 55520c2fb03..e5029318693 100644 --- a/base/glibc-compatibility/musl/libm.h +++ b/base/glibc-compatibility/musl/libm.h @@ -155,7 +155,7 @@ static inline long double fp_barrierl(long double x) static inline void fp_force_evalf(float x) { volatile float y; - y = x; + y = x; //-V1001 } #endif @@ -164,7 +164,7 @@ static inline void fp_force_evalf(float x) static inline void fp_force_eval(double x) { volatile double y; - y = x; + y = x; //-V1001 } #endif @@ -173,7 +173,7 @@ static inline void fp_force_eval(double x) static inline void fp_force_evall(long double x) { volatile long double y; - y = x; + y = x; //-V1001 } #endif diff --git a/base/glibc-compatibility/musl/powf.c b/base/glibc-compatibility/musl/powf.c index de8fab54554..35dc3611b94 100644 --- a/base/glibc-compatibility/musl/powf.c +++ b/base/glibc-compatibility/musl/powf.c @@ -3,6 +3,9 @@ * SPDX-License-Identifier: MIT */ +// Disable warnings by PVS-Studio +//-V::GA + #include #include #include "libm.h" diff --git a/base/loggers/CMakeLists.txt b/base/loggers/CMakeLists.txt index 48868cf1e0d..22be002e069 100644 --- a/base/loggers/CMakeLists.txt +++ b/base/loggers/CMakeLists.txt @@ -1,4 +1,4 @@ -include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) +include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") add_headers_and_sources(loggers .) add_library(loggers ${loggers_sources} ${loggers_headers}) target_link_libraries(loggers PRIVATE dbms clickhouse_common_io) diff --git a/base/loggers/Loggers.cpp b/base/loggers/Loggers.cpp index ed806741895..80e62d0a6d6 100644 --- a/base/loggers/Loggers.cpp +++ b/base/loggers/Loggers.cpp @@ -6,10 +6,11 @@ #include "OwnFormattingChannel.h" #include "OwnPatternFormatter.h" #include -#include #include #include -#include +#include + +namespace fs = std::filesystem; namespace DB { @@ -20,11 +21,11 @@ namespace DB // TODO: move to libcommon static std::string createDirectory(const std::string & file) { - auto path = Poco::Path(file).makeParent(); - if (path.toString().empty()) + auto path = fs::path(file).parent_path(); + if (path.empty()) return ""; - Poco::File(path).createDirectories(); - return path.toString(); + fs::create_directories(path); + return path; }; void Loggers::setTextLog(std::shared_ptr log, int max_priority) @@ -40,7 +41,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log split->addTextLog(log, text_log_max_priority); auto current_logger = config.getString("logger", ""); - if (config_logger == current_logger) + if (config_logger == current_logger) //-V1051 return; config_logger = current_logger; @@ -51,16 +52,26 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log /// Use extended interface of Channel for more comprehensive logging. split = new DB::OwnSplitChannel(); - auto log_level = config.getString("logger.level", "trace"); + auto log_level_string = config.getString("logger.level", "trace"); + + /// different channels (log, console, syslog) may have different loglevels configured + /// The maximum (the most verbose) of those will be used as default for Poco loggers + int max_log_level = 0; + const auto log_path = config.getString("logger.log", ""); if (!log_path.empty()) { createDirectory(log_path); - std::cerr << "Logging " << log_level << " to " << log_path << std::endl; + std::cerr << "Logging " << log_level_string << " to " << log_path << std::endl; + auto log_level = Poco::Logger::parseLevel(log_level_string); + if (log_level > max_log_level) + { + max_log_level = log_level; + } // Set up two channel chains. log_file = new Poco::FileChannel; - log_file->setProperty(Poco::FileChannel::PROP_PATH, Poco::Path(log_path).absolute().toString()); + log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(log_path)); log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M")); log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number"); log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true")); @@ -69,9 +80,10 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false")); log_file->open(); - Poco::AutoPtr pf = new OwnPatternFormatter(this); + Poco::AutoPtr pf = new OwnPatternFormatter; Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, log_file); + log->setLevel(log_level); split->addChannel(log); } @@ -79,10 +91,19 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log if (!errorlog_path.empty()) { createDirectory(errorlog_path); + + // NOTE: we don't use notice & critical in the code, so in practice error log collects fatal & error & warning. + // (!) Warnings are important, they require attention and should never be silenced / ignored. + auto errorlog_level = Poco::Logger::parseLevel(config.getString("logger.errorlog_level", "notice")); + if (errorlog_level > max_log_level) + { + max_log_level = errorlog_level; + } + std::cerr << "Logging errors to " << errorlog_path << std::endl; error_log_file = new Poco::FileChannel; - error_log_file->setProperty(Poco::FileChannel::PROP_PATH, Poco::Path(errorlog_path).absolute().toString()); + error_log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(errorlog_path)); error_log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M")); error_log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number"); error_log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true")); @@ -90,20 +111,22 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true")); error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false")); - Poco::AutoPtr pf = new OwnPatternFormatter(this); + Poco::AutoPtr pf = new OwnPatternFormatter; Poco::AutoPtr errorlog = new DB::OwnFormattingChannel(pf, error_log_file); - errorlog->setLevel(Poco::Message::PRIO_NOTICE); + errorlog->setLevel(errorlog_level); errorlog->open(); split->addChannel(errorlog); } - /// "dynamic_layer_selection" is needed only for Yandex.Metrika, that share part of ClickHouse code. - /// We don't need this configuration parameter. - - if (config.getBool("logger.use_syslog", false) || config.getBool("dynamic_layer_selection", false)) + if (config.getBool("logger.use_syslog", false)) { //const std::string & cmd_name = commandName(); + auto syslog_level = Poco::Logger::parseLevel(config.getString("logger.syslog_level", log_level_string)); + if (syslog_level > max_log_level) + { + max_log_level = syslog_level; + } if (config.has("logger.syslog.address")) { @@ -127,9 +150,11 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log } syslog_channel->open(); - Poco::AutoPtr pf = new OwnPatternFormatter(this, OwnPatternFormatter::ADD_LAYER_TAG); + Poco::AutoPtr pf = new OwnPatternFormatter; Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, syslog_channel); + log->setLevel(syslog_level); + split->addChannel(log); } @@ -141,9 +166,17 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log { bool color_enabled = config.getBool("logger.color_terminal", color_logs_by_default); - Poco::AutoPtr pf = new OwnPatternFormatter(this, OwnPatternFormatter::ADD_NOTHING, color_enabled); + auto console_log_level_string = config.getString("logger.console_log_level", log_level_string); + auto console_log_level = Poco::Logger::parseLevel(console_log_level_string); + if (console_log_level > max_log_level) + { + max_log_level = console_log_level; + } + + Poco::AutoPtr pf = new OwnPatternFormatter(color_enabled); Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel); - logger.warning("Logging " + log_level + " to console"); + logger.warning("Logging " + console_log_level_string + " to console"); + log->setLevel(console_log_level); split->addChannel(log); } @@ -152,17 +185,17 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log logger.setChannel(split); // Global logging level (it can be overridden for specific loggers). - logger.setLevel(log_level); + logger.setLevel(max_log_level); // Set level to all already created loggers std::vector names; //logger_root = Logger::root(); logger.root().names(names); for (const auto & name : names) - logger.root().get(name).setLevel(log_level); + logger.root().get(name).setLevel(max_log_level); // Attach to the root logger. - logger.root().setLevel(log_level); + logger.root().setLevel(max_log_level); logger.root().setChannel(logger.getChannel()); // Explicitly specified log levels for specific loggers. diff --git a/base/loggers/Loggers.h b/base/loggers/Loggers.h index 9ed75046468..151c1d3566f 100644 --- a/base/loggers/Loggers.h +++ b/base/loggers/Loggers.h @@ -8,6 +8,7 @@ #include #include "OwnSplitChannel.h" + namespace Poco::Util { class AbstractConfiguration; @@ -21,16 +22,8 @@ public: /// Close log files. On next log write files will be reopened. void closeLogs(Poco::Logger & logger); - std::optional getLayer() const - { - return layer; /// layer set in inheritor class BaseDaemonApplication. - } - void setTextLog(std::shared_ptr log, int max_priority); -protected: - std::optional layer; - private: Poco::AutoPtr log_file; Poco::AutoPtr error_log_file; diff --git a/base/loggers/OwnFormattingChannel.h b/base/loggers/OwnFormattingChannel.h index cd2e66279d7..2336dacad04 100644 --- a/base/loggers/OwnFormattingChannel.h +++ b/base/loggers/OwnFormattingChannel.h @@ -22,6 +22,9 @@ public: void setLevel(Poco::Message::Priority priority_) { priority = priority_; } + // Poco::Logger::parseLevel returns ints + void setLevel(int level) { priority = static_cast(level); } + void open() override { if (pChannel) diff --git a/base/loggers/OwnPatternFormatter.cpp b/base/loggers/OwnPatternFormatter.cpp index 029d06ff949..e62039f4a27 100644 --- a/base/loggers/OwnPatternFormatter.cpp +++ b/base/loggers/OwnPatternFormatter.cpp @@ -13,31 +13,18 @@ #include "Loggers.h" -OwnPatternFormatter::OwnPatternFormatter(const Loggers * loggers_, OwnPatternFormatter::Options options_, bool color_) - : Poco::PatternFormatter(""), loggers(loggers_), options(options_), color(color_) +OwnPatternFormatter::OwnPatternFormatter(bool color_) + : Poco::PatternFormatter(""), color(color_) { } -void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) +void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const { DB::WriteBufferFromString wb(text); const Poco::Message & msg = msg_ext.base; - /// For syslog: tag must be before message and first whitespace. - /// This code is only used in Yandex.Metrika and unneeded in ClickHouse. - if ((options & ADD_LAYER_TAG) && loggers) - { - auto layer = loggers->getLayer(); - if (layer) - { - writeCString("layer[", wb); - DB::writeIntText(*layer, wb); - writeCString("]: ", wb); - } - } - /// Change delimiters in date for compatibility with old logs. DB::writeDateTimeText<'.', ':'>(msg_ext.time_seconds, wb); diff --git a/base/loggers/OwnPatternFormatter.h b/base/loggers/OwnPatternFormatter.h index 4aedcc04637..fba4f0964cb 100644 --- a/base/loggers/OwnPatternFormatter.h +++ b/base/loggers/OwnPatternFormatter.h @@ -24,20 +24,11 @@ class Loggers; class OwnPatternFormatter : public Poco::PatternFormatter { public: - /// ADD_LAYER_TAG is needed only for Yandex.Metrika, that share part of ClickHouse code. - enum Options - { - ADD_NOTHING = 0, - ADD_LAYER_TAG = 1 << 0 - }; - - OwnPatternFormatter(const Loggers * loggers_, Options options_ = ADD_NOTHING, bool color_ = false); + OwnPatternFormatter(bool color_ = false); void format(const Poco::Message & msg, std::string & text) override; - void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text); + void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const; private: - const Loggers * loggers; - Options options; bool color; }; diff --git a/base/mysqlxx/CMakeLists.txt b/base/mysqlxx/CMakeLists.txt index 849c58a8527..c5230c2b49f 100644 --- a/base/mysqlxx/CMakeLists.txt +++ b/base/mysqlxx/CMakeLists.txt @@ -14,8 +14,8 @@ add_library (mysqlxx target_include_directories (mysqlxx PUBLIC ..) if (USE_INTERNAL_MYSQL_LIBRARY) - target_include_directories (mysqlxx PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include) - target_include_directories (mysqlxx PUBLIC ${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include) + target_include_directories (mysqlxx PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include") + target_include_directories (mysqlxx PUBLIC "${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include") else () set(PLATFORM_LIBRARIES ${CMAKE_DL_LIBS}) diff --git a/base/mysqlxx/Pool.h b/base/mysqlxx/Pool.h index b6189663f55..530e2c78cf2 100644 --- a/base/mysqlxx/Pool.h +++ b/base/mysqlxx/Pool.h @@ -159,9 +159,9 @@ public: */ Pool(const std::string & db_, const std::string & server_, - const std::string & user_ = "", - const std::string & password_ = "", - unsigned port_ = 0, + const std::string & user_, + const std::string & password_, + unsigned port_, const std::string & socket_ = "", unsigned connect_timeout_ = MYSQLXX_DEFAULT_TIMEOUT, unsigned rw_timeout_ = MYSQLXX_DEFAULT_RW_TIMEOUT, diff --git a/base/mysqlxx/PoolWithFailover.cpp b/base/mysqlxx/PoolWithFailover.cpp index 5e9f70f4ac1..e317ab7f228 100644 --- a/base/mysqlxx/PoolWithFailover.cpp +++ b/base/mysqlxx/PoolWithFailover.cpp @@ -2,7 +2,6 @@ #include #include #include - #include @@ -15,9 +14,12 @@ static bool startsWith(const std::string & s, const char * prefix) using namespace mysqlxx; -PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & config_, - const std::string & config_name_, const unsigned default_connections_, - const unsigned max_connections_, const size_t max_tries_) +PoolWithFailover::PoolWithFailover( + const Poco::Util::AbstractConfiguration & config_, + const std::string & config_name_, + const unsigned default_connections_, + const unsigned max_connections_, + const size_t max_tries_) : max_tries(max_tries_) { shareable = config_.getBool(config_name_ + ".share_connection", false); @@ -59,16 +61,46 @@ PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & con } } -PoolWithFailover::PoolWithFailover(const std::string & config_name_, const unsigned default_connections_, - const unsigned max_connections_, const size_t max_tries_) - : PoolWithFailover{ - Poco::Util::Application::instance().config(), config_name_, - default_connections_, max_connections_, max_tries_} + +PoolWithFailover::PoolWithFailover( + const std::string & config_name_, + const unsigned default_connections_, + const unsigned max_connections_, + const size_t max_tries_) + : PoolWithFailover{Poco::Util::Application::instance().config(), + config_name_, default_connections_, max_connections_, max_tries_} { } + +PoolWithFailover::PoolWithFailover( + const std::string & database, + const RemoteDescription & addresses, + const std::string & user, + const std::string & password, + unsigned default_connections_, + unsigned max_connections_, + size_t max_tries_) + : max_tries(max_tries_) + , shareable(false) +{ + /// Replicas have the same priority, but traversed replicas are moved to the end of the queue. + for (const auto & [host, port] : addresses) + { + replicas_by_priority[0].emplace_back(std::make_shared(database, + host, user, password, port, + /* socket_ = */ "", + MYSQLXX_DEFAULT_TIMEOUT, + MYSQLXX_DEFAULT_RW_TIMEOUT, + default_connections_, + max_connections_)); + } +} + + PoolWithFailover::PoolWithFailover(const PoolWithFailover & other) - : max_tries{other.max_tries}, shareable{other.shareable} + : max_tries{other.max_tries} + , shareable{other.shareable} { if (shareable) { diff --git a/base/mysqlxx/PoolWithFailover.h b/base/mysqlxx/PoolWithFailover.h index 029fc3ebad3..1c7a63e76c0 100644 --- a/base/mysqlxx/PoolWithFailover.h +++ b/base/mysqlxx/PoolWithFailover.h @@ -11,6 +11,8 @@ namespace mysqlxx { /** MySQL connection pool with support of failover. + * + * For dictionary source: * Have information about replicas and their priorities. * Tries to connect to replica in an order of priority. When equal priority, choose replica with maximum time without connections. * @@ -68,42 +70,60 @@ namespace mysqlxx using PoolPtr = std::shared_ptr; using Replicas = std::vector; - /// [priority][index] -> replica. + /// [priority][index] -> replica. Highest priority is 0. using ReplicasByPriority = std::map; - ReplicasByPriority replicas_by_priority; /// Number of connection tries. size_t max_tries; /// Mutex for set of replicas. std::mutex mutex; - /// Can the Pool be shared bool shareable; public: using Entry = Pool::Entry; + using RemoteDescription = std::vector>; /** - * config_name Name of parameter in configuration file. + * * Mysql dictionary sourse related params: + * config_name Name of parameter in configuration file for dictionary source. + * + * * Mysql storage related parameters: + * replicas_description + * + * * Mutual parameters: * default_connections Number of connection in pool to each replica at start. * max_connections Maximum number of connections in pool to each replica. * max_tries_ Max number of connection tries. */ - PoolWithFailover(const std::string & config_name_, + PoolWithFailover( + const std::string & config_name_, unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); - PoolWithFailover(const Poco::Util::AbstractConfiguration & config_, + PoolWithFailover( + const Poco::Util::AbstractConfiguration & config_, const std::string & config_name_, unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + PoolWithFailover( + const std::string & database, + const RemoteDescription & addresses, + const std::string & user, + const std::string & password, + unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, + unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, + size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + PoolWithFailover(const PoolWithFailover & other); /** Allocates a connection to use. */ Entry get(); }; + + using PoolWithFailoverPtr = std::shared_ptr; } diff --git a/base/pcg-random/pcg_extras.hpp b/base/pcg-random/pcg_extras.hpp index b71e859a25f..39c91c4ecfa 100644 --- a/base/pcg-random/pcg_extras.hpp +++ b/base/pcg-random/pcg_extras.hpp @@ -447,69 +447,6 @@ inline SrcIter uneven_copy(SrcIter src_first, std::integral_constant{}); } -/* generate_to, fill in a fixed-size array of integral type using a SeedSeq - * (actually works for any random-access iterator) - */ - -template -inline void generate_to_impl(SeedSeq&& generator, DestIter dest, - std::true_type) -{ - generator.generate(dest, dest+size); -} - -template -void generate_to_impl(SeedSeq&& generator, DestIter dest, - std::false_type) -{ - typedef typename std::iterator_traits::value_type dest_t; - constexpr auto DEST_SIZE = sizeof(dest_t); - constexpr auto GEN_SIZE = sizeof(uint32_t); - - constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE; - constexpr size_t FROM_ELEMS = - GEN_IS_SMALLER - ? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE) - : (size + (GEN_SIZE / DEST_SIZE) - 1) - / ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER); - // this odd code ^^^^^^^^^^^^^^^^^ is work-around for - // a bug: http://llvm.org/bugs/show_bug.cgi?id=21287 - - if (FROM_ELEMS <= 1024) { - uint32_t buffer[FROM_ELEMS]; - generator.generate(buffer, buffer+FROM_ELEMS); - uneven_copy(buffer, dest, dest+size); - } else { - uint32_t* buffer = static_cast(malloc(GEN_SIZE * FROM_ELEMS)); - generator.generate(buffer, buffer+FROM_ELEMS); - uneven_copy(buffer, dest, dest+size); - free(static_cast(buffer)); - } -} - -template -inline void generate_to(SeedSeq&& generator, DestIter dest) -{ - typedef typename std::iterator_traits::value_type dest_t; - constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t); - - generate_to_impl(std::forward(generator), dest, - std::integral_constant{}); -} - -/* generate_one, produce a value of integral type using a SeedSeq - * (optionally, we can have it produce more than one and pick which one - * we want) - */ - -template -inline UInt generate_one(SeedSeq&& generator) -{ - UInt result[N]; - generate_to(std::forward(generator), result); - return result[i]; -} - template auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) -> typename RngType::result_type @@ -517,7 +454,7 @@ auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) typedef typename RngType::result_type rtype; rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound) % upper_bound; - for (;;) { + for (;;) { //-V1044 rtype r = rng() - RngType::min(); if (r >= threshold) return r % upper_bound; diff --git a/base/pcg-random/pcg_random.hpp b/base/pcg-random/pcg_random.hpp index abf83a60ee1..d9d3519a4cf 100644 --- a/base/pcg-random/pcg_random.hpp +++ b/base/pcg-random/pcg_random.hpp @@ -928,7 +928,7 @@ struct rxs_m_xs_mixin { constexpr bitcount_t shift = bits - xtypebits; constexpr bitcount_t mask = (1 << opbits) - 1; bitcount_t rshift = - opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; //-V547 internal ^= internal >> (opbits + rshift); internal *= mcg_multiplier::multiplier(); xtype result = internal >> shift; @@ -950,7 +950,7 @@ struct rxs_m_xs_mixin { internal *= mcg_unmultiplier::unmultiplier(); - bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547 internal = unxorshift(internal, bits, opbits + rshift); return internal; @@ -975,7 +975,7 @@ struct rxs_m_mixin { : 2; constexpr bitcount_t shift = bits - xtypebits; constexpr bitcount_t mask = (1 << opbits) - 1; - bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547 internal ^= internal >> (opbits + rshift); internal *= mcg_multiplier::multiplier(); xtype result = internal >> shift; @@ -1366,7 +1366,7 @@ void extended::selfinit() // - any strange correlations would only be apparent if we // were to backstep the generator so that the base generator // was generating the same values again - result_type xdiff = baseclass::operator()() - baseclass::operator()(); + result_type xdiff = baseclass::operator()() - baseclass::operator()(); //-V501 for (size_t i = 0; i < table_size; ++i) { data_[i] = baseclass::operator()() ^ xdiff; } @@ -1643,22 +1643,22 @@ typedef setseq_base template -using ext_std8 = extended; template -using ext_std16 = extended; template -using ext_std32 = extended; template -using ext_std64 = extended; diff --git a/cmake/arch.cmake b/cmake/arch.cmake index 9604ef62b31..60e0346dbbf 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -1,7 +1,7 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64") set (ARCH_AMD64 1) endif () -if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)") +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)") set (ARCH_AARCH64 1) endif () if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm") diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index bd7885bc41b..34de50e9f8a 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -1,9 +1,9 @@ # This strings autochanged from release_lib.sh: -SET(VERSION_REVISION 54449) +SET(VERSION_REVISION 54452) SET(VERSION_MAJOR 21) -SET(VERSION_MINOR 4) +SET(VERSION_MINOR 7) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH af2135ef9dc72f16fa4f229b731262c3f0a8bbdc) -SET(VERSION_DESCRIBE v21.4.1.1-prestable) -SET(VERSION_STRING 21.4.1.1) +SET(VERSION_GITHASH 976ccc2e908ac3bc28f763bfea8134ea0a121b40) +SET(VERSION_DESCRIBE v21.7.1.1-prestable) +SET(VERSION_STRING 21.7.1.1) # end of autochange diff --git a/cmake/check_flags.cmake b/cmake/check_flags.cmake new file mode 100644 index 00000000000..5a4ff472868 --- /dev/null +++ b/cmake/check_flags.cmake @@ -0,0 +1,6 @@ +include (CheckCXXCompilerFlag) +include (CheckCCompilerFlag) + +check_cxx_compiler_flag("-Wsuggest-destructor-override" HAS_SUGGEST_DESTRUCTOR_OVERRIDE) +check_cxx_compiler_flag("-Wshadow" HAS_SHADOW) +check_cxx_compiler_flag("-Wsuggest-override" HAS_SUGGEST_OVERRIDE) diff --git a/cmake/darwin/default_libs.cmake b/cmake/darwin/default_libs.cmake index 79ac675f234..a6ee800d59b 100644 --- a/cmake/darwin/default_libs.cmake +++ b/cmake/darwin/default_libs.cmake @@ -1,11 +1,14 @@ set (DEFAULT_LIBS "-nodefaultlibs") -if (NOT COMPILER_CLANG) - message (FATAL_ERROR "Darwin build is supported only for Clang") -endif () - set (DEFAULT_LIBS "${DEFAULT_LIBS} ${COVERAGE_OPTION} -lc -lm -lpthread -ldl") +if (COMPILER_GCC) + set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc_eh") + if (ARCH_AARCH64) + set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc") + endif () +endif () + message(STATUS "Default libraries: ${DEFAULT_LIBS}") set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) diff --git a/cmake/darwin/toolchain-aarch64.cmake b/cmake/darwin/toolchain-aarch64.cmake new file mode 100644 index 00000000000..81398111495 --- /dev/null +++ b/cmake/darwin/toolchain-aarch64.cmake @@ -0,0 +1,14 @@ +set (CMAKE_SYSTEM_NAME "Darwin") +set (CMAKE_SYSTEM_PROCESSOR "aarch64") +set (CMAKE_C_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_CXX_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64") + +set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake + +set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) + +set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/find/amqpcpp.cmake b/cmake/find/amqpcpp.cmake index 4191dce26bb..a4a58349508 100644 --- a/cmake/find/amqpcpp.cmake +++ b/cmake/find/amqpcpp.cmake @@ -1,3 +1,8 @@ +if (MISSING_INTERNAL_LIBUV_LIBRARY) + message (WARNING "Can't find internal libuv needed for AMQP-CPP library") + set (ENABLE_AMQPCPP OFF CACHE INTERNAL "") +endif() + option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES}) if (NOT ENABLE_AMQPCPP) @@ -12,11 +17,13 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt") endif () set (USE_AMQPCPP 1) -set (AMQPCPP_LIBRARY AMQP-CPP) +set (AMQPCPP_LIBRARY amqp-cpp) set (AMQPCPP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/include") list (APPEND AMQPCPP_INCLUDE_DIR - "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/include" + "${LIBUV_INCLUDE_DIR}" "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP") +list (APPEND AMQPCPP_LIBRARY "${LIBUV_LIBRARY}") + message (STATUS "Using AMQP-CPP=${USE_AMQPCPP}: ${AMQPCPP_INCLUDE_DIR} : ${AMQPCPP_LIBRARY}") diff --git a/cmake/find/base64.cmake b/cmake/find/base64.cmake index 7427baf9cad..acade11eb2f 100644 --- a/cmake/find/base64.cmake +++ b/cmake/find/base64.cmake @@ -1,4 +1,8 @@ -option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES}) +if(ARCH_AMD64 OR ARCH_ARM) + option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES}) +elseif(ENABLE_BASE64) + message (${RECONFIGURE_MESSAGE_LEVEL} "base64 library is only supported on x86_64 and aarch64") +endif() if (NOT ENABLE_BASE64) return() diff --git a/cmake/find/cassandra.cmake b/cmake/find/cassandra.cmake index 037d6c3f131..b6e97ff5ef8 100644 --- a/cmake/find/cassandra.cmake +++ b/cmake/find/cassandra.cmake @@ -1,3 +1,8 @@ +if (MISSING_INTERNAL_LIBUV_LIBRARY) + message (WARNING "Disabling cassandra due to missing libuv") + set (ENABLE_CASSANDRA OFF CACHE INTERNAL "") +endif() + option(ENABLE_CASSANDRA "Enable Cassandra" ${ENABLE_LIBRARIES}) if (NOT ENABLE_CASSANDRA) @@ -8,27 +13,22 @@ if (APPLE) set(CMAKE_MACOSX_RPATH ON) endif() -if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libuv") - message (ERROR "submodule contrib/libuv is missing. to fix try run: \n git submodule update --init --recursive") - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libuv needed for Cassandra") -elseif (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cassandra") +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cassandra") message (ERROR "submodule contrib/cassandra is missing. to fix try run: \n git submodule update --init --recursive") message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal Cassandra") -else() - set (LIBUV_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/libuv") - set (CASSANDRA_INCLUDE_DIR - "${ClickHouse_SOURCE_DIR}/contrib/cassandra/include/") - if (MAKE_STATIC_LIBRARIES) - set (LIBUV_LIBRARY uv_a) - set (CASSANDRA_LIBRARY cassandra_static) - else() - set (LIBUV_LIBRARY uv) - set (CASSANDRA_LIBRARY cassandra) - endif() - - set (USE_CASSANDRA 1) - set (CASS_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/cassandra") + set (USE_CASSANDRA 0) + return() endif() +set (USE_CASSANDRA 1) +set (CASSANDRA_INCLUDE_DIR + "${ClickHouse_SOURCE_DIR}/contrib/cassandra/include/") +if (MAKE_STATIC_LIBRARIES) + set (CASSANDRA_LIBRARY cassandra_static) +else() + set (CASSANDRA_LIBRARY cassandra) +endif() + +set (CASS_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/cassandra") + message (STATUS "Using cassandra=${USE_CASSANDRA}: ${CASSANDRA_INCLUDE_DIR} : ${CASSANDRA_LIBRARY}") -message (STATUS "Using libuv: ${LIBUV_ROOT_DIR} : ${LIBUV_LIBRARY}") diff --git a/cmake/find/ccache.cmake b/cmake/find/ccache.cmake index fea1f8b4c97..986c9cb5fe2 100644 --- a/cmake/find/ccache.cmake +++ b/cmake/find/ccache.cmake @@ -32,7 +32,9 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE) if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang") message(STATUS "Using ${CCACHE_FOUND} ${CCACHE_VERSION}") - set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND}) + set (CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_CXX_COMPILER_LAUNCHER}) + set (CMAKE_C_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_C_COMPILER_LAUNCHER}) + set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND}) # debian (debhelpers) set SOURCE_DATE_EPOCH environment variable, that is diff --git a/cmake/find/datasketches.cmake b/cmake/find/datasketches.cmake new file mode 100644 index 00000000000..44ef324a9f2 --- /dev/null +++ b/cmake/find/datasketches.cmake @@ -0,0 +1,29 @@ +option (ENABLE_DATASKETCHES "Enable DataSketches" ${ENABLE_LIBRARIES}) + +if (ENABLE_DATASKETCHES) + +option (USE_INTERNAL_DATASKETCHES_LIBRARY "Set to FALSE to use system DataSketches library instead of bundled" ${NOT_UNBUNDLED}) + +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/CMakeLists.txt") + if (USE_INTERNAL_DATASKETCHES_LIBRARY) + message(WARNING "submodule contrib/datasketches-cpp is missing. to fix try run: \n git submodule update --init --recursive") + endif() + set(MISSING_INTERNAL_DATASKETCHES_LIBRARY 1) + set(USE_INTERNAL_DATASKETCHES_LIBRARY 0) +endif() + +if (USE_INTERNAL_DATASKETCHES_LIBRARY) + set(DATASKETCHES_LIBRARY theta) + set(DATASKETCHES_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include" "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include") +elseif (NOT MISSING_INTERNAL_DATASKETCHES_LIBRARY) + find_library(DATASKETCHES_LIBRARY theta) + find_path(DATASKETCHES_INCLUDE_DIR NAMES theta_sketch.hpp PATHS ${DATASKETCHES_INCLUDE_PATHS}) +endif() + +if (DATASKETCHES_LIBRARY AND DATASKETCHES_INCLUDE_DIR) + set(USE_DATASKETCHES 1) +endif() + +endif() + +message (STATUS "Using datasketches=${USE_DATASKETCHES}: ${DATASKETCHES_INCLUDE_DIR} : ${DATASKETCHES_LIBRARY}") diff --git a/cmake/find/fastops.cmake b/cmake/find/fastops.cmake index 5ab320bdb7a..1675646654e 100644 --- a/cmake/find/fastops.cmake +++ b/cmake/find/fastops.cmake @@ -1,7 +1,7 @@ -if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT OS_DARWIN) +if(ARCH_AMD64 AND NOT OS_FREEBSD AND NOT OS_DARWIN) option(ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${ENABLE_LIBRARIES}) elseif(ENABLE_FASTOPS) - message (${RECONFIGURE_MESSAGE_LEVEL} "Fastops library is not supported on ARM, FreeBSD and Darwin") + message (${RECONFIGURE_MESSAGE_LEVEL} "Fastops library is supported on x86_64 only, and not FreeBSD or Darwin") endif() if(NOT ENABLE_FASTOPS) diff --git a/cmake/find/hdfs3.cmake b/cmake/find/hdfs3.cmake index 7b385f24e1e..3aab2b612ef 100644 --- a/cmake/find/hdfs3.cmake +++ b/cmake/find/hdfs3.cmake @@ -1,4 +1,4 @@ -if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF) +if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF AND NOT ARCH_PPC64LE) option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES}) elseif(ENABLE_HDFS OR USE_INTERNAL_HDFS3_LIBRARY) message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration") diff --git a/cmake/find/ldap.cmake b/cmake/find/ldap.cmake index 369c1e42e8d..d8baea89429 100644 --- a/cmake/find/ldap.cmake +++ b/cmake/find/ldap.cmake @@ -62,8 +62,10 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY) if ( ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "x86_64" ) OR ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR + ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR ( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR - ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) + ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) OR + ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "aarch64" ) ) set (_ldap_supported_platform TRUE) endif () diff --git a/cmake/find/libuv.cmake b/cmake/find/libuv.cmake new file mode 100644 index 00000000000..f0023209309 --- /dev/null +++ b/cmake/find/libuv.cmake @@ -0,0 +1,22 @@ +if (OS_DARWIN AND COMPILER_GCC) + message (WARNING "libuv cannot be built with GCC in macOS due to a bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93082") + SET(MISSING_INTERNAL_LIBUV_LIBRARY 1) + return() +endif() + +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libuv") + message (WARNING "submodule contrib/libuv is missing. to fix try run: \n git submodule update --init --recursive") + SET(MISSING_INTERNAL_LIBUV_LIBRARY 1) + return() +endif() + +if (MAKE_STATIC_LIBRARIES) + set (LIBUV_LIBRARY uv_a) +else() + set (LIBUV_LIBRARY uv) +endif() + +set (LIBUV_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/libuv") +set (LIBUV_INCLUDE_DIR "${LIBUV_ROOT_DIR}/include") + +message (STATUS "Using libuv: ${LIBUV_ROOT_DIR} : ${LIBUV_LIBRARY}") diff --git a/cmake/find/llvm.cmake b/cmake/find/llvm.cmake index e0ba1d9b039..816164bef10 100644 --- a/cmake/find/llvm.cmake +++ b/cmake/find/llvm.cmake @@ -1,108 +1,39 @@ -if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64) +if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined") set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "") endif() -option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile_expressions' option for query execution" ${ENABLE_LIBRARIES}) -# Broken in macos. TODO: update clang, re-test, enable on Apple -if (ENABLE_EMBEDDED_COMPILER AND NOT SPLIT_SHARED_LIBRARIES AND ARCH_AMD64 AND NOT (SANITIZE STREQUAL "undefined")) - option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED}) -endif() +option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON) if (NOT ENABLE_EMBEDDED_COMPILER) - if(USE_INTERNAL_LLVM_LIBRARY) - message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use internal LLVM library with ENABLE_EMBEDDED_COMPILER=OFF") - endif() + set (USE_EMBEDDED_COMPILER 0) return() endif() if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/CMakeLists.txt") - if (USE_INTERNAL_LLVM_LIBRARY) - message (WARNING "submodule contrib/llvm is missing. to fix try run: \n git submodule update --init --recursive") - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't fidd internal LLVM library") - endif() - set (MISSING_INTERNAL_LLVM_LIBRARY 1) + message (${RECONFIGURE_MESSAGE_LEVEL} "submodule /contrib/llvm is missing. to fix try run: \n git submodule update --init --recursive") endif () -if (NOT USE_INTERNAL_LLVM_LIBRARY) - set (LLVM_PATHS "/usr/local/lib/llvm") +set (USE_EMBEDDED_COMPILER 1) - foreach(llvm_v 10 9 8) - if (NOT LLVM_FOUND) - find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS}) - endif () - endforeach () +set (LLVM_FOUND 1) +set (LLVM_VERSION "12.0.0bundled") +set (LLVM_INCLUDE_DIRS + "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include" + "${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include" +) +set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm/llvm") - if (LLVM_FOUND) - # Remove dynamically-linked zlib and libedit from LLVM's dependencies: - set_target_properties(LLVMSupport PROPERTIES INTERFACE_LINK_LIBRARIES "-lpthread;LLVMDemangle;${ZLIB_LIBRARIES}") - set_target_properties(LLVMLineEditor PROPERTIES INTERFACE_LINK_LIBRARIES "LLVMSupport") - - option(LLVM_HAS_RTTI "Enable if LLVM was build with RTTI enabled" ON) - set (USE_EMBEDDED_COMPILER 1) - else() - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system LLVM") - set (USE_EMBEDDED_COMPILER 0) - endif() - - if (LLVM_FOUND AND OS_LINUX AND USE_LIBCXX AND NOT FORCE_LLVM_WITH_LIBCXX) - message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is not set but the LLVM library from OS packages " - "in Linux is incompatible with libc++ ABI. LLVM Will be disabled. Force: -DFORCE_LLVM_WITH_LIBCXX=ON") - message (${RECONFIGURE_MESSAGE_LEVEL} "Unsupported LLVM configuration, cannot enable LLVM") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - endif () -endif() - -if(NOT LLVM_FOUND AND NOT MISSING_INTERNAL_LLVM_LIBRARY) - if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) - message(WARNING "Option ENABLE_EMBEDDED_COMPILER is set but internal LLVM library cannot build if build directory is the same as source directory.") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - elseif (SPLIT_SHARED_LIBRARIES) - # llvm-tablegen cannot find shared libraries that we build. Probably can be easily fixed. - message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is not compatible with SPLIT_SHARED_LIBRARIES. Build of LLVM will be disabled.") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - elseif (NOT ARCH_AMD64) - # It's not supported yet, but you can help. - message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is only available for x86_64. Build of LLVM will be disabled.") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - elseif (SANITIZE STREQUAL "undefined") - # llvm-tblgen, that is used during LLVM build, doesn't work with UBSan. - message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY does not work with UBSan, because 'llvm-tblgen' tool from LLVM has undefined behaviour. Build of LLVM will be disabled.") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - else () - set (USE_INTERNAL_LLVM_LIBRARY ON) - set (LLVM_FOUND 1) - set (USE_EMBEDDED_COMPILER 1) - set (LLVM_VERSION "9.0.0bundled") - set (LLVM_INCLUDE_DIRS - "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include" - "${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include" - ) - set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm/llvm") - endif() -endif() - -if (LLVM_FOUND) - message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}") - message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}") - message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}") -else() - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't enable LLVM") -endif() +message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}") +message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}") +message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}") # This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles. set (REQUIRED_LLVM_LIBRARIES -LLVMOrcJIT LLVMExecutionEngine LLVMRuntimeDyld LLVMX86CodeGen LLVMX86Desc LLVMX86Info -LLVMX86Utils LLVMAsmPrinter LLVMDebugInfoDWARF LLVMGlobalISel diff --git a/cmake/find/nanodbc.cmake b/cmake/find/nanodbc.cmake new file mode 100644 index 00000000000..894a2a60bad --- /dev/null +++ b/cmake/find/nanodbc.cmake @@ -0,0 +1,16 @@ +if (NOT ENABLE_ODBC) + return () +endif () + +if (NOT USE_INTERNAL_NANODBC_LIBRARY) + message (FATAL_ERROR "Only the bundled nanodbc library can be used") +endif () + +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/CMakeLists.txt") + message (FATAL_ERROR "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive") +endif() + +set (NANODBC_LIBRARY nanodbc) +set (NANODBC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/nanodbc") + +message (STATUS "Using nanodbc: ${NANODBC_INCLUDE_DIR} : ${NANODBC_LIBRARY}") diff --git a/cmake/find/nuraft.cmake b/cmake/find/nuraft.cmake index 7fa5251946e..4e5258e132f 100644 --- a/cmake/find/nuraft.cmake +++ b/cmake/find/nuraft.cmake @@ -11,7 +11,7 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/NuRaft/CMakeLists.txt") return() endif () -if (NOT OS_FREEBSD AND NOT OS_DARWIN) +if (NOT OS_FREEBSD) set (USE_NURAFT 1) set (NURAFT_LIBRARY nuraft) diff --git a/cmake/find/odbc.cmake b/cmake/find/odbc.cmake index a23f0c831e9..c475e600c0d 100644 --- a/cmake/find/odbc.cmake +++ b/cmake/find/odbc.cmake @@ -50,4 +50,6 @@ if (NOT EXTERNAL_ODBC_LIBRARY_FOUND) set (USE_INTERNAL_ODBC_LIBRARY 1) endif () +set (USE_INTERNAL_NANODBC_LIBRARY 1) + message (STATUS "Using unixodbc") diff --git a/cmake/find/rocksdb.cmake b/cmake/find/rocksdb.cmake index 968cdb52407..94278a603d7 100644 --- a/cmake/find/rocksdb.cmake +++ b/cmake/find/rocksdb.cmake @@ -1,3 +1,7 @@ +if (OS_DARWIN AND ARCH_AARCH64) + set (ENABLE_ROCKSDB OFF CACHE INTERNAL "") +endif() + option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES}) if (NOT ENABLE_ROCKSDB) diff --git a/cmake/find/s3.cmake b/cmake/find/s3.cmake index 1bbf48fd6b0..1b0c652a31a 100644 --- a/cmake/find/s3.cmake +++ b/cmake/find/s3.cmake @@ -1,7 +1,7 @@ -if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_ARM) +if(NOT OS_FREEBSD AND NOT APPLE) option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES}) elseif(ENABLE_S3 OR USE_INTERNAL_AWS_S3_LIBRARY) - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on ARM, Apple or FreeBSD") + message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on Apple or FreeBSD") endif() if(NOT ENABLE_S3) diff --git a/cmake/find/termcap.cmake b/cmake/find/termcap.cmake deleted file mode 100644 index 58454165785..00000000000 --- a/cmake/find/termcap.cmake +++ /dev/null @@ -1,17 +0,0 @@ -if (ENABLE_EMBEDDED_COMPILER AND NOT USE_INTERNAL_LLVM_LIBRARY AND USE_STATIC_LIBRARIES) - find_library (TERMCAP_LIBRARY tinfo) - if (NOT TERMCAP_LIBRARY) - find_library (TERMCAP_LIBRARY ncurses) - endif() - if (NOT TERMCAP_LIBRARY) - find_library (TERMCAP_LIBRARY termcap) - endif() - - if (NOT TERMCAP_LIBRARY) - message (FATAL_ERROR "Statically Linking external LLVM requires termcap") - endif() - - target_link_libraries(LLVMSupport INTERFACE ${TERMCAP_LIBRARY}) - - message (STATUS "Using termcap: ${TERMCAP_LIBRARY}") -endif() diff --git a/cmake/find/xz.cmake b/cmake/find/xz.cmake new file mode 100644 index 00000000000..0d19859c6b1 --- /dev/null +++ b/cmake/find/xz.cmake @@ -0,0 +1,27 @@ +option (USE_INTERNAL_XZ_LIBRARY "Set to OFF to use system xz (lzma) library instead of bundled" ${NOT_UNBUNDLED}) + +if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/xz/src/liblzma/api/lzma.h") + if(USE_INTERNAL_XZ_LIBRARY) + message(WARNING "submodule contrib/xz is missing. to fix try run: \n git submodule update --init --recursive") + message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal xz (lzma) library") + set(USE_INTERNAL_XZ_LIBRARY 0) + endif() + set(MISSING_INTERNAL_XZ_LIBRARY 1) +endif() + +if (NOT USE_INTERNAL_XZ_LIBRARY) + find_library (XZ_LIBRARY lzma) + find_path (XZ_INCLUDE_DIR NAMES lzma.h PATHS ${XZ_INCLUDE_PATHS}) + if (NOT XZ_LIBRARY OR NOT XZ_INCLUDE_DIR) + message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system xz (lzma) library") + endif () +endif () + +if (XZ_LIBRARY AND XZ_INCLUDE_DIR) +elseif (NOT MISSING_INTERNAL_XZ_LIBRARY) + set (USE_INTERNAL_XZ_LIBRARY 1) + set (XZ_LIBRARY liblzma) + set (XZ_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/xz/src/liblzma/api) +endif () + +message (STATUS "Using xz (lzma): ${XZ_INCLUDE_DIR} : ${XZ_LIBRARY}") diff --git a/cmake/find/yaml-cpp.cmake b/cmake/find/yaml-cpp.cmake new file mode 100644 index 00000000000..9b9d9bd39d6 --- /dev/null +++ b/cmake/find/yaml-cpp.cmake @@ -0,0 +1,9 @@ +option(USE_YAML_CPP "Enable yaml-cpp" ${ENABLE_LIBRARIES}) + +if (NOT USE_YAML_CPP) + return() +endif() + +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/yaml-cpp") + message (ERROR "submodule contrib/yaml-cpp is missing. to fix try run: \n git submodule update --init --recursive") +endif() diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index d3a727e9cb8..c1e4d450389 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -6,7 +6,7 @@ set (DEFAULT_LIBS "-nodefaultlibs") # We need builtins from Clang's RT even without libcxx - for ubsan+int128. # See https://bugs.llvm.org/show_bug.cgi?id=16404 if (COMPILER_CLANG AND NOT (CMAKE_CROSSCOMPILING AND ARCH_AARCH64)) - execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) + execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) else () set (BUILTINS_LIBRARY "-lgcc") endif () diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index 6c23ce8bc91..f60f7431389 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -40,7 +40,7 @@ if (SANITIZE) # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to # keep the binary size down. # TODO: try compiling with -Og and with ld.gold. - set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") + set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") diff --git a/cmake/target.cmake b/cmake/target.cmake index 7174ca3c2a9..d1a0b8f9cbf 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -12,6 +12,9 @@ elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") set (OS_DARWIN 1) add_definitions(-D OS_DARWIN) +elseif (CMAKE_SYSTEM_NAME MATCHES "SunOS") + set (OS_SUNOS 1) + add_definitions(-D OS_SUNOS) endif () if (CMAKE_CROSSCOMPILING) diff --git a/cmake/tools.cmake b/cmake/tools.cmake index abb11843d59..8ff94ab867b 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -8,10 +8,13 @@ endif () if (COMPILER_GCC) # Require minimum version of gcc - set (GCC_MINIMUM_VERSION 9) + set (GCC_MINIMUM_VERSION 10) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION} AND NOT CMAKE_VERSION VERSION_LESS 2.8.9) message (FATAL_ERROR "GCC version must be at least ${GCC_MINIMUM_VERSION}. For example, if GCC ${GCC_MINIMUM_VERSION} is available under gcc-${GCC_MINIMUM_VERSION}, g++-${GCC_MINIMUM_VERSION} names, do the following: export CC=gcc-${GCC_MINIMUM_VERSION} CXX=g++-${GCC_MINIMUM_VERSION}; rm -rf CMakeCache.txt CMakeFiles; and re run cmake or ./release.") endif () + + message (WARNING "GCC compiler is not officially supported for ClickHouse. You should migrate to clang.") + elseif (COMPILER_CLANG) # Require minimum version of clang/apple-clang if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang") @@ -86,8 +89,3 @@ if (LINKER_NAME) message(STATUS "Using custom linker by name: ${LINKER_NAME}") endif () -if (ARCH_PPC64LE) - if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8)) - message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture") - endif () -endif () diff --git a/cmake/warnings.cmake b/cmake/warnings.cmake index 8122e9ef31e..a85fe8963c7 100644 --- a/cmake/warnings.cmake +++ b/cmake/warnings.cmake @@ -11,11 +11,6 @@ if (NOT MSVC) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") endif () -if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") -endif () - # Add some warnings that are not available even with -Wall -Wextra -Wpedantic. # Intended for exploration of new compiler warnings that may be found useful. # Applies to clang only @@ -176,6 +171,7 @@ elseif (COMPILER_GCC) add_cxx_compile_options(-Wtrampolines) # Obvious add_cxx_compile_options(-Wunused) + add_cxx_compile_options(-Wundef) # Warn if vector operation is not implemented via SIMD capabilities of the architecture add_cxx_compile_options(-Wvector-operation-performance) # XXX: libstdc++ has some of these for 3way compare diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 6a56b4cc733..c499da9d087 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -1,4 +1,3 @@ -# Third-party libraries may have substandard code. # Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default. # Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will @@ -11,8 +10,10 @@ else () endif () unset (_current_dir_name) -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w") +# Third-party libraries may have substandard code. +# Also remove a possible source of nondeterminism. +set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=") +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=") if (WITH_COVERAGE) set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE}) @@ -47,7 +48,15 @@ add_subdirectory (lz4-cmake) add_subdirectory (murmurhash) add_subdirectory (replxx-cmake) add_subdirectory (unixodbc-cmake) -add_subdirectory (xz) +add_subdirectory (nanodbc-cmake) + +if (USE_YAML_CPP) + add_subdirectory (yaml-cpp-cmake) +endif() + +if (USE_INTERNAL_XZ_LIBRARY) + add_subdirectory (xz) +endif() add_subdirectory (poco-cmake) add_subdirectory (croaring-cmake) @@ -93,14 +102,8 @@ if (USE_INTERNAL_ZLIB_LIBRARY) add_subdirectory (${INTERNAL_ZLIB_NAME}) # We should use same defines when including zlib.h as used when zlib compiled target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP) - if (TARGET zlibstatic) - target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP) - endif () if (ARCH_AMD64 OR ARCH_AARCH64) target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK) - if (TARGET zlibstatic) - target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK) - endif () endif () endif () @@ -206,25 +209,26 @@ elseif(GTEST_SRC_DIR) target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0) endif() -if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY) +if (USE_EMBEDDED_COMPILER) # ld: unknown option: --color-diagnostics if (APPLE) set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "") endif () + set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "") set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "") set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "") - # Yes it is set globally, but this is not enough, since llvm will add -std=c++11 after default - # And c++2a cannot be used, due to ambiguous operator != - if (COMPILER_GCC OR COMPILER_CLANG) - set (_CXX_STANDARD "gnu++17") - else() - set (_CXX_STANDARD "c++17") - endif() - set (LLVM_CXX_STD ${_CXX_STANDARD} CACHE STRING "" FORCE) + + # Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc. + # LLVM project will set its default value for the -std=... but our global setting from CMake will override it. + set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD}) + set (CMAKE_CXX_STANDARD 17) + add_subdirectory (llvm/llvm) - target_include_directories(LLVMSupport SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR}) + + set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak}) + unset (CMAKE_CXX_STANDARD_bak) endif () if (USE_INTERNAL_LIBGSASL_LIBRARY) @@ -280,7 +284,14 @@ if (USE_AMQPCPP) add_subdirectory (amqpcpp-cmake) endif() if (USE_CASSANDRA) + # Need to use C++17 since the compilation is not possible with C++20 currently. + set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD}) + set (CMAKE_CXX_STANDARD 17) + add_subdirectory (cassandra) + + set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak}) + unset (CMAKE_CXX_STANDARD_bak) endif() # Should go before: diff --git a/contrib/NuRaft b/contrib/NuRaft index 70468326ad5..95d6bbba579 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 70468326ad5d72e9497944838484c591dae054ea +Subproject commit 95d6bbba579b3a4e4c2dede954f541ff6f3dba51 diff --git a/contrib/amqpcpp-cmake/CMakeLists.txt b/contrib/amqpcpp-cmake/CMakeLists.txt index 4853983680e..4e8342af125 100644 --- a/contrib/amqpcpp-cmake/CMakeLists.txt +++ b/contrib/amqpcpp-cmake/CMakeLists.txt @@ -1,25 +1,25 @@ -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP") set (SRCS - ${LIBRARY_DIR}/src/array.cpp - ${LIBRARY_DIR}/src/channel.cpp - ${LIBRARY_DIR}/src/channelimpl.cpp - ${LIBRARY_DIR}/src/connectionimpl.cpp - ${LIBRARY_DIR}/src/deferredcancel.cpp - ${LIBRARY_DIR}/src/deferredconfirm.cpp - ${LIBRARY_DIR}/src/deferredconsumer.cpp - ${LIBRARY_DIR}/src/deferredextreceiver.cpp - ${LIBRARY_DIR}/src/deferredget.cpp - ${LIBRARY_DIR}/src/deferredpublisher.cpp - ${LIBRARY_DIR}/src/deferredreceiver.cpp - ${LIBRARY_DIR}/src/field.cpp - ${LIBRARY_DIR}/src/flags.cpp - ${LIBRARY_DIR}/src/linux_tcp/openssl.cpp - ${LIBRARY_DIR}/src/linux_tcp/tcpconnection.cpp - ${LIBRARY_DIR}/src/inbuffer.cpp - ${LIBRARY_DIR}/src/receivedframe.cpp - ${LIBRARY_DIR}/src/table.cpp - ${LIBRARY_DIR}/src/watchable.cpp + "${LIBRARY_DIR}/src/array.cpp" + "${LIBRARY_DIR}/src/channel.cpp" + "${LIBRARY_DIR}/src/channelimpl.cpp" + "${LIBRARY_DIR}/src/connectionimpl.cpp" + "${LIBRARY_DIR}/src/deferredcancel.cpp" + "${LIBRARY_DIR}/src/deferredconfirm.cpp" + "${LIBRARY_DIR}/src/deferredconsumer.cpp" + "${LIBRARY_DIR}/src/deferredextreceiver.cpp" + "${LIBRARY_DIR}/src/deferredget.cpp" + "${LIBRARY_DIR}/src/deferredpublisher.cpp" + "${LIBRARY_DIR}/src/deferredreceiver.cpp" + "${LIBRARY_DIR}/src/field.cpp" + "${LIBRARY_DIR}/src/flags.cpp" + "${LIBRARY_DIR}/src/linux_tcp/openssl.cpp" + "${LIBRARY_DIR}/src/linux_tcp/tcpconnection.cpp" + "${LIBRARY_DIR}/src/inbuffer.cpp" + "${LIBRARY_DIR}/src/receivedframe.cpp" + "${LIBRARY_DIR}/src/table.cpp" + "${LIBRARY_DIR}/src/watchable.cpp" ) add_library(amqp-cpp ${SRCS}) @@ -39,7 +39,7 @@ target_compile_options (amqp-cpp -w ) -target_include_directories (amqp-cpp SYSTEM PUBLIC ${LIBRARY_DIR}/include) +target_include_directories (amqp-cpp SYSTEM PUBLIC "${LIBRARY_DIR}/include") target_link_libraries (amqp-cpp PUBLIC ssl) diff --git a/contrib/antlr4-runtime b/contrib/antlr4-runtime index a2fa7b76e2e..672643e9a42 160000 --- a/contrib/antlr4-runtime +++ b/contrib/antlr4-runtime @@ -1 +1 @@ -Subproject commit a2fa7b76e2ee16d2ad955e9214a90bbf79da66fc +Subproject commit 672643e9a427ef803abf13bc8cb4989606553d64 diff --git a/contrib/antlr4-runtime-cmake/CMakeLists.txt b/contrib/antlr4-runtime-cmake/CMakeLists.txt index 5baefdb1e29..4f639a33ebf 100644 --- a/contrib/antlr4-runtime-cmake/CMakeLists.txt +++ b/contrib/antlr4-runtime-cmake/CMakeLists.txt @@ -1,154 +1,154 @@ -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/antlr4-runtime) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/antlr4-runtime") set (SRCS - ${LIBRARY_DIR}/ANTLRErrorListener.cpp - ${LIBRARY_DIR}/ANTLRErrorStrategy.cpp - ${LIBRARY_DIR}/ANTLRFileStream.cpp - ${LIBRARY_DIR}/ANTLRInputStream.cpp - ${LIBRARY_DIR}/atn/AbstractPredicateTransition.cpp - ${LIBRARY_DIR}/atn/ActionTransition.cpp - ${LIBRARY_DIR}/atn/AmbiguityInfo.cpp - ${LIBRARY_DIR}/atn/ArrayPredictionContext.cpp - ${LIBRARY_DIR}/atn/ATN.cpp - ${LIBRARY_DIR}/atn/ATNConfig.cpp - ${LIBRARY_DIR}/atn/ATNConfigSet.cpp - ${LIBRARY_DIR}/atn/ATNDeserializationOptions.cpp - ${LIBRARY_DIR}/atn/ATNDeserializer.cpp - ${LIBRARY_DIR}/atn/ATNSerializer.cpp - ${LIBRARY_DIR}/atn/ATNSimulator.cpp - ${LIBRARY_DIR}/atn/ATNState.cpp - ${LIBRARY_DIR}/atn/AtomTransition.cpp - ${LIBRARY_DIR}/atn/BasicBlockStartState.cpp - ${LIBRARY_DIR}/atn/BasicState.cpp - ${LIBRARY_DIR}/atn/BlockEndState.cpp - ${LIBRARY_DIR}/atn/BlockStartState.cpp - ${LIBRARY_DIR}/atn/ContextSensitivityInfo.cpp - ${LIBRARY_DIR}/atn/DecisionEventInfo.cpp - ${LIBRARY_DIR}/atn/DecisionInfo.cpp - ${LIBRARY_DIR}/atn/DecisionState.cpp - ${LIBRARY_DIR}/atn/EmptyPredictionContext.cpp - ${LIBRARY_DIR}/atn/EpsilonTransition.cpp - ${LIBRARY_DIR}/atn/ErrorInfo.cpp - ${LIBRARY_DIR}/atn/LexerAction.cpp - ${LIBRARY_DIR}/atn/LexerActionExecutor.cpp - ${LIBRARY_DIR}/atn/LexerATNConfig.cpp - ${LIBRARY_DIR}/atn/LexerATNSimulator.cpp - ${LIBRARY_DIR}/atn/LexerChannelAction.cpp - ${LIBRARY_DIR}/atn/LexerCustomAction.cpp - ${LIBRARY_DIR}/atn/LexerIndexedCustomAction.cpp - ${LIBRARY_DIR}/atn/LexerModeAction.cpp - ${LIBRARY_DIR}/atn/LexerMoreAction.cpp - ${LIBRARY_DIR}/atn/LexerPopModeAction.cpp - ${LIBRARY_DIR}/atn/LexerPushModeAction.cpp - ${LIBRARY_DIR}/atn/LexerSkipAction.cpp - ${LIBRARY_DIR}/atn/LexerTypeAction.cpp - ${LIBRARY_DIR}/atn/LL1Analyzer.cpp - ${LIBRARY_DIR}/atn/LookaheadEventInfo.cpp - ${LIBRARY_DIR}/atn/LoopEndState.cpp - ${LIBRARY_DIR}/atn/NotSetTransition.cpp - ${LIBRARY_DIR}/atn/OrderedATNConfigSet.cpp - ${LIBRARY_DIR}/atn/ParseInfo.cpp - ${LIBRARY_DIR}/atn/ParserATNSimulator.cpp - ${LIBRARY_DIR}/atn/PlusBlockStartState.cpp - ${LIBRARY_DIR}/atn/PlusLoopbackState.cpp - ${LIBRARY_DIR}/atn/PrecedencePredicateTransition.cpp - ${LIBRARY_DIR}/atn/PredicateEvalInfo.cpp - ${LIBRARY_DIR}/atn/PredicateTransition.cpp - ${LIBRARY_DIR}/atn/PredictionContext.cpp - ${LIBRARY_DIR}/atn/PredictionMode.cpp - ${LIBRARY_DIR}/atn/ProfilingATNSimulator.cpp - ${LIBRARY_DIR}/atn/RangeTransition.cpp - ${LIBRARY_DIR}/atn/RuleStartState.cpp - ${LIBRARY_DIR}/atn/RuleStopState.cpp - ${LIBRARY_DIR}/atn/RuleTransition.cpp - ${LIBRARY_DIR}/atn/SemanticContext.cpp - ${LIBRARY_DIR}/atn/SetTransition.cpp - ${LIBRARY_DIR}/atn/SingletonPredictionContext.cpp - ${LIBRARY_DIR}/atn/StarBlockStartState.cpp - ${LIBRARY_DIR}/atn/StarLoopbackState.cpp - ${LIBRARY_DIR}/atn/StarLoopEntryState.cpp - ${LIBRARY_DIR}/atn/TokensStartState.cpp - ${LIBRARY_DIR}/atn/Transition.cpp - ${LIBRARY_DIR}/atn/WildcardTransition.cpp - ${LIBRARY_DIR}/BailErrorStrategy.cpp - ${LIBRARY_DIR}/BaseErrorListener.cpp - ${LIBRARY_DIR}/BufferedTokenStream.cpp - ${LIBRARY_DIR}/CharStream.cpp - ${LIBRARY_DIR}/CommonToken.cpp - ${LIBRARY_DIR}/CommonTokenFactory.cpp - ${LIBRARY_DIR}/CommonTokenStream.cpp - ${LIBRARY_DIR}/ConsoleErrorListener.cpp - ${LIBRARY_DIR}/DefaultErrorStrategy.cpp - ${LIBRARY_DIR}/dfa/DFA.cpp - ${LIBRARY_DIR}/dfa/DFASerializer.cpp - ${LIBRARY_DIR}/dfa/DFAState.cpp - ${LIBRARY_DIR}/dfa/LexerDFASerializer.cpp - ${LIBRARY_DIR}/DiagnosticErrorListener.cpp - ${LIBRARY_DIR}/Exceptions.cpp - ${LIBRARY_DIR}/FailedPredicateException.cpp - ${LIBRARY_DIR}/InputMismatchException.cpp - ${LIBRARY_DIR}/InterpreterRuleContext.cpp - ${LIBRARY_DIR}/IntStream.cpp - ${LIBRARY_DIR}/Lexer.cpp - ${LIBRARY_DIR}/LexerInterpreter.cpp - ${LIBRARY_DIR}/LexerNoViableAltException.cpp - ${LIBRARY_DIR}/ListTokenSource.cpp - ${LIBRARY_DIR}/misc/InterpreterDataReader.cpp - ${LIBRARY_DIR}/misc/Interval.cpp - ${LIBRARY_DIR}/misc/IntervalSet.cpp - ${LIBRARY_DIR}/misc/MurmurHash.cpp - ${LIBRARY_DIR}/misc/Predicate.cpp - ${LIBRARY_DIR}/NoViableAltException.cpp - ${LIBRARY_DIR}/Parser.cpp - ${LIBRARY_DIR}/ParserInterpreter.cpp - ${LIBRARY_DIR}/ParserRuleContext.cpp - ${LIBRARY_DIR}/ProxyErrorListener.cpp - ${LIBRARY_DIR}/RecognitionException.cpp - ${LIBRARY_DIR}/Recognizer.cpp - ${LIBRARY_DIR}/RuleContext.cpp - ${LIBRARY_DIR}/RuleContextWithAltNum.cpp - ${LIBRARY_DIR}/RuntimeMetaData.cpp - ${LIBRARY_DIR}/support/Any.cpp - ${LIBRARY_DIR}/support/Arrays.cpp - ${LIBRARY_DIR}/support/CPPUtils.cpp - ${LIBRARY_DIR}/support/guid.cpp - ${LIBRARY_DIR}/support/StringUtils.cpp - ${LIBRARY_DIR}/Token.cpp - ${LIBRARY_DIR}/TokenSource.cpp - ${LIBRARY_DIR}/TokenStream.cpp - ${LIBRARY_DIR}/TokenStreamRewriter.cpp - ${LIBRARY_DIR}/tree/ErrorNode.cpp - ${LIBRARY_DIR}/tree/ErrorNodeImpl.cpp - ${LIBRARY_DIR}/tree/IterativeParseTreeWalker.cpp - ${LIBRARY_DIR}/tree/ParseTree.cpp - ${LIBRARY_DIR}/tree/ParseTreeListener.cpp - ${LIBRARY_DIR}/tree/ParseTreeVisitor.cpp - ${LIBRARY_DIR}/tree/ParseTreeWalker.cpp - ${LIBRARY_DIR}/tree/pattern/Chunk.cpp - ${LIBRARY_DIR}/tree/pattern/ParseTreeMatch.cpp - ${LIBRARY_DIR}/tree/pattern/ParseTreePattern.cpp - ${LIBRARY_DIR}/tree/pattern/ParseTreePatternMatcher.cpp - ${LIBRARY_DIR}/tree/pattern/RuleTagToken.cpp - ${LIBRARY_DIR}/tree/pattern/TagChunk.cpp - ${LIBRARY_DIR}/tree/pattern/TextChunk.cpp - ${LIBRARY_DIR}/tree/pattern/TokenTagToken.cpp - ${LIBRARY_DIR}/tree/TerminalNode.cpp - ${LIBRARY_DIR}/tree/TerminalNodeImpl.cpp - ${LIBRARY_DIR}/tree/Trees.cpp - ${LIBRARY_DIR}/tree/xpath/XPath.cpp - ${LIBRARY_DIR}/tree/xpath/XPathElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathLexer.cpp - ${LIBRARY_DIR}/tree/xpath/XPathLexerErrorListener.cpp - ${LIBRARY_DIR}/tree/xpath/XPathRuleAnywhereElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathRuleElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathTokenAnywhereElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathTokenElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathWildcardAnywhereElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathWildcardElement.cpp - ${LIBRARY_DIR}/UnbufferedCharStream.cpp - ${LIBRARY_DIR}/UnbufferedTokenStream.cpp - ${LIBRARY_DIR}/Vocabulary.cpp - ${LIBRARY_DIR}/WritableToken.cpp + "${LIBRARY_DIR}/ANTLRErrorListener.cpp" + "${LIBRARY_DIR}/ANTLRErrorStrategy.cpp" + "${LIBRARY_DIR}/ANTLRFileStream.cpp" + "${LIBRARY_DIR}/ANTLRInputStream.cpp" + "${LIBRARY_DIR}/atn/AbstractPredicateTransition.cpp" + "${LIBRARY_DIR}/atn/ActionTransition.cpp" + "${LIBRARY_DIR}/atn/AmbiguityInfo.cpp" + "${LIBRARY_DIR}/atn/ArrayPredictionContext.cpp" + "${LIBRARY_DIR}/atn/ATN.cpp" + "${LIBRARY_DIR}/atn/ATNConfig.cpp" + "${LIBRARY_DIR}/atn/ATNConfigSet.cpp" + "${LIBRARY_DIR}/atn/ATNDeserializationOptions.cpp" + "${LIBRARY_DIR}/atn/ATNDeserializer.cpp" + "${LIBRARY_DIR}/atn/ATNSerializer.cpp" + "${LIBRARY_DIR}/atn/ATNSimulator.cpp" + "${LIBRARY_DIR}/atn/ATNState.cpp" + "${LIBRARY_DIR}/atn/AtomTransition.cpp" + "${LIBRARY_DIR}/atn/BasicBlockStartState.cpp" + "${LIBRARY_DIR}/atn/BasicState.cpp" + "${LIBRARY_DIR}/atn/BlockEndState.cpp" + "${LIBRARY_DIR}/atn/BlockStartState.cpp" + "${LIBRARY_DIR}/atn/ContextSensitivityInfo.cpp" + "${LIBRARY_DIR}/atn/DecisionEventInfo.cpp" + "${LIBRARY_DIR}/atn/DecisionInfo.cpp" + "${LIBRARY_DIR}/atn/DecisionState.cpp" + "${LIBRARY_DIR}/atn/EmptyPredictionContext.cpp" + "${LIBRARY_DIR}/atn/EpsilonTransition.cpp" + "${LIBRARY_DIR}/atn/ErrorInfo.cpp" + "${LIBRARY_DIR}/atn/LexerAction.cpp" + "${LIBRARY_DIR}/atn/LexerActionExecutor.cpp" + "${LIBRARY_DIR}/atn/LexerATNConfig.cpp" + "${LIBRARY_DIR}/atn/LexerATNSimulator.cpp" + "${LIBRARY_DIR}/atn/LexerChannelAction.cpp" + "${LIBRARY_DIR}/atn/LexerCustomAction.cpp" + "${LIBRARY_DIR}/atn/LexerIndexedCustomAction.cpp" + "${LIBRARY_DIR}/atn/LexerModeAction.cpp" + "${LIBRARY_DIR}/atn/LexerMoreAction.cpp" + "${LIBRARY_DIR}/atn/LexerPopModeAction.cpp" + "${LIBRARY_DIR}/atn/LexerPushModeAction.cpp" + "${LIBRARY_DIR}/atn/LexerSkipAction.cpp" + "${LIBRARY_DIR}/atn/LexerTypeAction.cpp" + "${LIBRARY_DIR}/atn/LL1Analyzer.cpp" + "${LIBRARY_DIR}/atn/LookaheadEventInfo.cpp" + "${LIBRARY_DIR}/atn/LoopEndState.cpp" + "${LIBRARY_DIR}/atn/NotSetTransition.cpp" + "${LIBRARY_DIR}/atn/OrderedATNConfigSet.cpp" + "${LIBRARY_DIR}/atn/ParseInfo.cpp" + "${LIBRARY_DIR}/atn/ParserATNSimulator.cpp" + "${LIBRARY_DIR}/atn/PlusBlockStartState.cpp" + "${LIBRARY_DIR}/atn/PlusLoopbackState.cpp" + "${LIBRARY_DIR}/atn/PrecedencePredicateTransition.cpp" + "${LIBRARY_DIR}/atn/PredicateEvalInfo.cpp" + "${LIBRARY_DIR}/atn/PredicateTransition.cpp" + "${LIBRARY_DIR}/atn/PredictionContext.cpp" + "${LIBRARY_DIR}/atn/PredictionMode.cpp" + "${LIBRARY_DIR}/atn/ProfilingATNSimulator.cpp" + "${LIBRARY_DIR}/atn/RangeTransition.cpp" + "${LIBRARY_DIR}/atn/RuleStartState.cpp" + "${LIBRARY_DIR}/atn/RuleStopState.cpp" + "${LIBRARY_DIR}/atn/RuleTransition.cpp" + "${LIBRARY_DIR}/atn/SemanticContext.cpp" + "${LIBRARY_DIR}/atn/SetTransition.cpp" + "${LIBRARY_DIR}/atn/SingletonPredictionContext.cpp" + "${LIBRARY_DIR}/atn/StarBlockStartState.cpp" + "${LIBRARY_DIR}/atn/StarLoopbackState.cpp" + "${LIBRARY_DIR}/atn/StarLoopEntryState.cpp" + "${LIBRARY_DIR}/atn/TokensStartState.cpp" + "${LIBRARY_DIR}/atn/Transition.cpp" + "${LIBRARY_DIR}/atn/WildcardTransition.cpp" + "${LIBRARY_DIR}/BailErrorStrategy.cpp" + "${LIBRARY_DIR}/BaseErrorListener.cpp" + "${LIBRARY_DIR}/BufferedTokenStream.cpp" + "${LIBRARY_DIR}/CharStream.cpp" + "${LIBRARY_DIR}/CommonToken.cpp" + "${LIBRARY_DIR}/CommonTokenFactory.cpp" + "${LIBRARY_DIR}/CommonTokenStream.cpp" + "${LIBRARY_DIR}/ConsoleErrorListener.cpp" + "${LIBRARY_DIR}/DefaultErrorStrategy.cpp" + "${LIBRARY_DIR}/dfa/DFA.cpp" + "${LIBRARY_DIR}/dfa/DFASerializer.cpp" + "${LIBRARY_DIR}/dfa/DFAState.cpp" + "${LIBRARY_DIR}/dfa/LexerDFASerializer.cpp" + "${LIBRARY_DIR}/DiagnosticErrorListener.cpp" + "${LIBRARY_DIR}/Exceptions.cpp" + "${LIBRARY_DIR}/FailedPredicateException.cpp" + "${LIBRARY_DIR}/InputMismatchException.cpp" + "${LIBRARY_DIR}/InterpreterRuleContext.cpp" + "${LIBRARY_DIR}/IntStream.cpp" + "${LIBRARY_DIR}/Lexer.cpp" + "${LIBRARY_DIR}/LexerInterpreter.cpp" + "${LIBRARY_DIR}/LexerNoViableAltException.cpp" + "${LIBRARY_DIR}/ListTokenSource.cpp" + "${LIBRARY_DIR}/misc/InterpreterDataReader.cpp" + "${LIBRARY_DIR}/misc/Interval.cpp" + "${LIBRARY_DIR}/misc/IntervalSet.cpp" + "${LIBRARY_DIR}/misc/MurmurHash.cpp" + "${LIBRARY_DIR}/misc/Predicate.cpp" + "${LIBRARY_DIR}/NoViableAltException.cpp" + "${LIBRARY_DIR}/Parser.cpp" + "${LIBRARY_DIR}/ParserInterpreter.cpp" + "${LIBRARY_DIR}/ParserRuleContext.cpp" + "${LIBRARY_DIR}/ProxyErrorListener.cpp" + "${LIBRARY_DIR}/RecognitionException.cpp" + "${LIBRARY_DIR}/Recognizer.cpp" + "${LIBRARY_DIR}/RuleContext.cpp" + "${LIBRARY_DIR}/RuleContextWithAltNum.cpp" + "${LIBRARY_DIR}/RuntimeMetaData.cpp" + "${LIBRARY_DIR}/support/Any.cpp" + "${LIBRARY_DIR}/support/Arrays.cpp" + "${LIBRARY_DIR}/support/CPPUtils.cpp" + "${LIBRARY_DIR}/support/guid.cpp" + "${LIBRARY_DIR}/support/StringUtils.cpp" + "${LIBRARY_DIR}/Token.cpp" + "${LIBRARY_DIR}/TokenSource.cpp" + "${LIBRARY_DIR}/TokenStream.cpp" + "${LIBRARY_DIR}/TokenStreamRewriter.cpp" + "${LIBRARY_DIR}/tree/ErrorNode.cpp" + "${LIBRARY_DIR}/tree/ErrorNodeImpl.cpp" + "${LIBRARY_DIR}/tree/IterativeParseTreeWalker.cpp" + "${LIBRARY_DIR}/tree/ParseTree.cpp" + "${LIBRARY_DIR}/tree/ParseTreeListener.cpp" + "${LIBRARY_DIR}/tree/ParseTreeVisitor.cpp" + "${LIBRARY_DIR}/tree/ParseTreeWalker.cpp" + "${LIBRARY_DIR}/tree/pattern/Chunk.cpp" + "${LIBRARY_DIR}/tree/pattern/ParseTreeMatch.cpp" + "${LIBRARY_DIR}/tree/pattern/ParseTreePattern.cpp" + "${LIBRARY_DIR}/tree/pattern/ParseTreePatternMatcher.cpp" + "${LIBRARY_DIR}/tree/pattern/RuleTagToken.cpp" + "${LIBRARY_DIR}/tree/pattern/TagChunk.cpp" + "${LIBRARY_DIR}/tree/pattern/TextChunk.cpp" + "${LIBRARY_DIR}/tree/pattern/TokenTagToken.cpp" + "${LIBRARY_DIR}/tree/TerminalNode.cpp" + "${LIBRARY_DIR}/tree/TerminalNodeImpl.cpp" + "${LIBRARY_DIR}/tree/Trees.cpp" + "${LIBRARY_DIR}/tree/xpath/XPath.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathLexer.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathLexerErrorListener.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathRuleAnywhereElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathRuleElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathTokenAnywhereElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathTokenElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathWildcardAnywhereElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathWildcardElement.cpp" + "${LIBRARY_DIR}/UnbufferedCharStream.cpp" + "${LIBRARY_DIR}/UnbufferedTokenStream.cpp" + "${LIBRARY_DIR}/Vocabulary.cpp" + "${LIBRARY_DIR}/WritableToken.cpp" ) add_library (antlr4-runtime ${SRCS}) diff --git a/contrib/arrow b/contrib/arrow index 744bdfe188f..616b3dc76a0 160000 --- a/contrib/arrow +++ b/contrib/arrow @@ -1 +1 @@ -Subproject commit 744bdfe188f018e5e05f5deebd4e9ee0a7706cf4 +Subproject commit 616b3dc76a0c8450b4027ded8a78e9619d7c845f diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 4b402a9db79..deefb244beb 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -2,69 +2,69 @@ set (CMAKE_CXX_STANDARD 17) # === thrift -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp") # contrib/thrift/lib/cpp/CMakeLists.txt set(thriftcpp_SOURCES - ${LIBRARY_DIR}/src/thrift/TApplicationException.cpp - ${LIBRARY_DIR}/src/thrift/TOutput.cpp - ${LIBRARY_DIR}/src/thrift/async/TAsyncChannel.cpp - ${LIBRARY_DIR}/src/thrift/async/TAsyncProtocolProcessor.cpp - ${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.h - ${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/ThreadManager.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/TimerManager.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/Util.cpp - ${LIBRARY_DIR}/src/thrift/processor/PeekProcessor.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TBase64Utils.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TDebugProtocol.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TJSONProtocol.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TMultiplexedProtocol.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TProtocol.cpp - ${LIBRARY_DIR}/src/thrift/transport/TTransportException.cpp - ${LIBRARY_DIR}/src/thrift/transport/TFDTransport.cpp - ${LIBRARY_DIR}/src/thrift/transport/TSimpleFileTransport.cpp - ${LIBRARY_DIR}/src/thrift/transport/THttpTransport.cpp - ${LIBRARY_DIR}/src/thrift/transport/THttpClient.cpp - ${LIBRARY_DIR}/src/thrift/transport/THttpServer.cpp - ${LIBRARY_DIR}/src/thrift/transport/TSocket.cpp - ${LIBRARY_DIR}/src/thrift/transport/TSocketPool.cpp - ${LIBRARY_DIR}/src/thrift/transport/TServerSocket.cpp - ${LIBRARY_DIR}/src/thrift/transport/TTransportUtils.cpp - ${LIBRARY_DIR}/src/thrift/transport/TBufferTransports.cpp - ${LIBRARY_DIR}/src/thrift/server/TConnectedClient.cpp - ${LIBRARY_DIR}/src/thrift/server/TServerFramework.cpp - ${LIBRARY_DIR}/src/thrift/server/TSimpleServer.cpp - ${LIBRARY_DIR}/src/thrift/server/TThreadPoolServer.cpp - ${LIBRARY_DIR}/src/thrift/server/TThreadedServer.cpp + "${LIBRARY_DIR}/src/thrift/TApplicationException.cpp" + "${LIBRARY_DIR}/src/thrift/TOutput.cpp" + "${LIBRARY_DIR}/src/thrift/async/TAsyncChannel.cpp" + "${LIBRARY_DIR}/src/thrift/async/TAsyncProtocolProcessor.cpp" + "${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.h" + "${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/ThreadManager.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/TimerManager.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/Util.cpp" + "${LIBRARY_DIR}/src/thrift/processor/PeekProcessor.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TBase64Utils.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TDebugProtocol.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TJSONProtocol.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TMultiplexedProtocol.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TProtocol.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TTransportException.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TFDTransport.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TSimpleFileTransport.cpp" + "${LIBRARY_DIR}/src/thrift/transport/THttpTransport.cpp" + "${LIBRARY_DIR}/src/thrift/transport/THttpClient.cpp" + "${LIBRARY_DIR}/src/thrift/transport/THttpServer.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TSocket.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TSocketPool.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TServerSocket.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TTransportUtils.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TBufferTransports.cpp" + "${LIBRARY_DIR}/src/thrift/server/TConnectedClient.cpp" + "${LIBRARY_DIR}/src/thrift/server/TServerFramework.cpp" + "${LIBRARY_DIR}/src/thrift/server/TSimpleServer.cpp" + "${LIBRARY_DIR}/src/thrift/server/TThreadPoolServer.cpp" + "${LIBRARY_DIR}/src/thrift/server/TThreadedServer.cpp" ) set(thriftcpp_threads_SOURCES - ${LIBRARY_DIR}/src/thrift/concurrency/ThreadFactory.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/Thread.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/Monitor.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp + "${LIBRARY_DIR}/src/thrift/concurrency/ThreadFactory.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/Thread.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/Monitor.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp" ) add_library(${THRIFT_LIBRARY} ${thriftcpp_SOURCES} ${thriftcpp_threads_SOURCES}) set_target_properties(${THRIFT_LIBRARY} PROPERTIES CXX_STANDARD 14) # REMOVE after https://github.com/apache/thrift/pull/1641 -target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src) +target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src") target_link_libraries (${THRIFT_LIBRARY} PRIVATE boost::headers_only) # === orc -set(ORC_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/orc/c++) -set(ORC_INCLUDE_DIR ${ORC_SOURCE_DIR}/include) -set(ORC_SOURCE_SRC_DIR ${ORC_SOURCE_DIR}/src) -set(ORC_SOURCE_WRAP_DIR ${ORC_SOURCE_DIR}/wrap) +set(ORC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++") +set(ORC_INCLUDE_DIR "${ORC_SOURCE_DIR}/include") +set(ORC_SOURCE_SRC_DIR "${ORC_SOURCE_DIR}/src") +set(ORC_SOURCE_WRAP_DIR "${ORC_SOURCE_DIR}/wrap") -set(ORC_BUILD_SRC_DIR ${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/src) -set(ORC_BUILD_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/include) +set(ORC_BUILD_SRC_DIR "${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/src") +set(ORC_BUILD_INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/include") -set(GOOGLE_PROTOBUF_DIR ${Protobuf_INCLUDE_DIR}/) +set(GOOGLE_PROTOBUF_DIR "${Protobuf_INCLUDE_DIR}/") set(ORC_ADDITION_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}) -set(ARROW_SRC_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src) +set(ARROW_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src") set(PROTOBUF_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE}) -set(PROTO_DIR ${ORC_SOURCE_DIR}/../proto) +set(PROTO_DIR "${ORC_SOURCE_DIR}/../proto") add_custom_command(OUTPUT orc_proto.pb.h orc_proto.pb.cc @@ -75,9 +75,9 @@ add_custom_command(OUTPUT orc_proto.pb.h orc_proto.pb.cc # === flatbuffers -set(FLATBUFFERS_SRC_DIR ${ClickHouse_SOURCE_DIR}/contrib/flatbuffers) -set(FLATBUFFERS_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/flatbuffers) -set(FLATBUFFERS_INCLUDE_DIR ${FLATBUFFERS_SRC_DIR}/include) +set(FLATBUFFERS_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/flatbuffers") +set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers") +set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include") # set flatbuffers CMake options if (MAKE_STATIC_LIBRARIES) @@ -101,187 +101,187 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") set(CXX11_FLAGS "-std=c++0x") endif () -include(${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake) +include("${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake") include(orc_check.cmake) configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh") configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/Adaptor.hh") set(ORC_SRCS - ${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc - ${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc - ${ORC_SOURCE_SRC_DIR}/Exceptions.cc - ${ORC_SOURCE_SRC_DIR}/OrcFile.cc - ${ORC_SOURCE_SRC_DIR}/Reader.cc - ${ORC_SOURCE_SRC_DIR}/ByteRLE.cc - ${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc - ${ORC_SOURCE_SRC_DIR}/ColumnReader.cc - ${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc - ${ORC_SOURCE_SRC_DIR}/Common.cc - ${ORC_SOURCE_SRC_DIR}/Compression.cc - ${ORC_SOURCE_SRC_DIR}/Exceptions.cc - ${ORC_SOURCE_SRC_DIR}/Int128.cc - ${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc - ${ORC_SOURCE_SRC_DIR}/MemoryPool.cc - ${ORC_SOURCE_SRC_DIR}/OrcFile.cc - ${ORC_SOURCE_SRC_DIR}/Reader.cc - ${ORC_SOURCE_SRC_DIR}/RLE.cc - ${ORC_SOURCE_SRC_DIR}/RLEv1.cc - ${ORC_SOURCE_SRC_DIR}/RLEv2.cc - ${ORC_SOURCE_SRC_DIR}/Statistics.cc - ${ORC_SOURCE_SRC_DIR}/StripeStream.cc - ${ORC_SOURCE_SRC_DIR}/Timezone.cc - ${ORC_SOURCE_SRC_DIR}/TypeImpl.cc - ${ORC_SOURCE_SRC_DIR}/Vector.cc - ${ORC_SOURCE_SRC_DIR}/Writer.cc - ${ORC_SOURCE_SRC_DIR}/io/InputStream.cc - ${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc - ${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc + "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc" + "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc" + "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" + "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" + "${ORC_SOURCE_SRC_DIR}/Reader.cc" + "${ORC_SOURCE_SRC_DIR}/ByteRLE.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnReader.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc" + "${ORC_SOURCE_SRC_DIR}/Common.cc" + "${ORC_SOURCE_SRC_DIR}/Compression.cc" + "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" + "${ORC_SOURCE_SRC_DIR}/Int128.cc" + "${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc" + "${ORC_SOURCE_SRC_DIR}/MemoryPool.cc" + "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" + "${ORC_SOURCE_SRC_DIR}/Reader.cc" + "${ORC_SOURCE_SRC_DIR}/RLE.cc" + "${ORC_SOURCE_SRC_DIR}/RLEv1.cc" + "${ORC_SOURCE_SRC_DIR}/RLEv2.cc" + "${ORC_SOURCE_SRC_DIR}/Statistics.cc" + "${ORC_SOURCE_SRC_DIR}/StripeStream.cc" + "${ORC_SOURCE_SRC_DIR}/Timezone.cc" + "${ORC_SOURCE_SRC_DIR}/TypeImpl.cc" + "${ORC_SOURCE_SRC_DIR}/Vector.cc" + "${ORC_SOURCE_SRC_DIR}/Writer.cc" + "${ORC_SOURCE_SRC_DIR}/io/InputStream.cc" + "${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc" + "${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc" ) # === arrow -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/arrow) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/arrow") configure_file("${LIBRARY_DIR}/util/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/cpp/src/arrow/util/config.h") # arrow/cpp/src/arrow/CMakeLists.txt set(ARROW_SRCS - ${LIBRARY_DIR}/buffer.cc - ${LIBRARY_DIR}/builder.cc - ${LIBRARY_DIR}/chunked_array.cc - ${LIBRARY_DIR}/compare.cc - ${LIBRARY_DIR}/datum.cc - ${LIBRARY_DIR}/device.cc - ${LIBRARY_DIR}/extension_type.cc - ${LIBRARY_DIR}/memory_pool.cc - ${LIBRARY_DIR}/pretty_print.cc - ${LIBRARY_DIR}/record_batch.cc - ${LIBRARY_DIR}/result.cc - ${LIBRARY_DIR}/scalar.cc - ${LIBRARY_DIR}/sparse_tensor.cc - ${LIBRARY_DIR}/status.cc - ${LIBRARY_DIR}/table_builder.cc - ${LIBRARY_DIR}/table.cc - ${LIBRARY_DIR}/tensor.cc - ${LIBRARY_DIR}/type.cc - ${LIBRARY_DIR}/visitor.cc + "${LIBRARY_DIR}/buffer.cc" + "${LIBRARY_DIR}/builder.cc" + "${LIBRARY_DIR}/chunked_array.cc" + "${LIBRARY_DIR}/compare.cc" + "${LIBRARY_DIR}/datum.cc" + "${LIBRARY_DIR}/device.cc" + "${LIBRARY_DIR}/extension_type.cc" + "${LIBRARY_DIR}/memory_pool.cc" + "${LIBRARY_DIR}/pretty_print.cc" + "${LIBRARY_DIR}/record_batch.cc" + "${LIBRARY_DIR}/result.cc" + "${LIBRARY_DIR}/scalar.cc" + "${LIBRARY_DIR}/sparse_tensor.cc" + "${LIBRARY_DIR}/status.cc" + "${LIBRARY_DIR}/table_builder.cc" + "${LIBRARY_DIR}/table.cc" + "${LIBRARY_DIR}/tensor.cc" + "${LIBRARY_DIR}/type.cc" + "${LIBRARY_DIR}/visitor.cc" - ${LIBRARY_DIR}/array/array_base.cc - ${LIBRARY_DIR}/array/array_binary.cc - ${LIBRARY_DIR}/array/array_decimal.cc - ${LIBRARY_DIR}/array/array_dict.cc - ${LIBRARY_DIR}/array/array_nested.cc - ${LIBRARY_DIR}/array/array_primitive.cc - ${LIBRARY_DIR}/array/builder_adaptive.cc - ${LIBRARY_DIR}/array/builder_base.cc - ${LIBRARY_DIR}/array/builder_binary.cc - ${LIBRARY_DIR}/array/builder_decimal.cc - ${LIBRARY_DIR}/array/builder_dict.cc - ${LIBRARY_DIR}/array/builder_nested.cc - ${LIBRARY_DIR}/array/builder_primitive.cc - ${LIBRARY_DIR}/array/builder_union.cc - ${LIBRARY_DIR}/array/concatenate.cc - ${LIBRARY_DIR}/array/data.cc - ${LIBRARY_DIR}/array/diff.cc - ${LIBRARY_DIR}/array/util.cc - ${LIBRARY_DIR}/array/validate.cc + "${LIBRARY_DIR}/array/array_base.cc" + "${LIBRARY_DIR}/array/array_binary.cc" + "${LIBRARY_DIR}/array/array_decimal.cc" + "${LIBRARY_DIR}/array/array_dict.cc" + "${LIBRARY_DIR}/array/array_nested.cc" + "${LIBRARY_DIR}/array/array_primitive.cc" + "${LIBRARY_DIR}/array/builder_adaptive.cc" + "${LIBRARY_DIR}/array/builder_base.cc" + "${LIBRARY_DIR}/array/builder_binary.cc" + "${LIBRARY_DIR}/array/builder_decimal.cc" + "${LIBRARY_DIR}/array/builder_dict.cc" + "${LIBRARY_DIR}/array/builder_nested.cc" + "${LIBRARY_DIR}/array/builder_primitive.cc" + "${LIBRARY_DIR}/array/builder_union.cc" + "${LIBRARY_DIR}/array/concatenate.cc" + "${LIBRARY_DIR}/array/data.cc" + "${LIBRARY_DIR}/array/diff.cc" + "${LIBRARY_DIR}/array/util.cc" + "${LIBRARY_DIR}/array/validate.cc" - ${LIBRARY_DIR}/compute/api_scalar.cc - ${LIBRARY_DIR}/compute/api_vector.cc - ${LIBRARY_DIR}/compute/cast.cc - ${LIBRARY_DIR}/compute/exec.cc - ${LIBRARY_DIR}/compute/function.cc - ${LIBRARY_DIR}/compute/kernel.cc - ${LIBRARY_DIR}/compute/registry.cc + "${LIBRARY_DIR}/compute/api_scalar.cc" + "${LIBRARY_DIR}/compute/api_vector.cc" + "${LIBRARY_DIR}/compute/cast.cc" + "${LIBRARY_DIR}/compute/exec.cc" + "${LIBRARY_DIR}/compute/function.cc" + "${LIBRARY_DIR}/compute/kernel.cc" + "${LIBRARY_DIR}/compute/registry.cc" - ${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc - ${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc - ${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc - ${LIBRARY_DIR}/compute/kernels/codegen_internal.cc - ${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc - ${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_nested.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_numeric.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_string.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_temporal.cc - ${LIBRARY_DIR}/compute/kernels/scalar_compare.cc - ${LIBRARY_DIR}/compute/kernels/scalar_fill_null.cc - ${LIBRARY_DIR}/compute/kernels/scalar_nested.cc - ${LIBRARY_DIR}/compute/kernels/scalar_set_lookup.cc - ${LIBRARY_DIR}/compute/kernels/scalar_string.cc - ${LIBRARY_DIR}/compute/kernels/scalar_validity.cc - ${LIBRARY_DIR}/compute/kernels/vector_hash.cc - ${LIBRARY_DIR}/compute/kernels/vector_nested.cc - ${LIBRARY_DIR}/compute/kernels/vector_selection.cc - ${LIBRARY_DIR}/compute/kernels/vector_sort.cc - ${LIBRARY_DIR}/compute/kernels/util_internal.cc + "${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc" + "${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc" + "${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc" + "${LIBRARY_DIR}/compute/kernels/codegen_internal.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_nested.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_numeric.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_string.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_temporal.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_compare.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_fill_null.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_nested.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_set_lookup.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_string.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_validity.cc" + "${LIBRARY_DIR}/compute/kernels/vector_hash.cc" + "${LIBRARY_DIR}/compute/kernels/vector_nested.cc" + "${LIBRARY_DIR}/compute/kernels/vector_selection.cc" + "${LIBRARY_DIR}/compute/kernels/vector_sort.cc" + "${LIBRARY_DIR}/compute/kernels/util_internal.cc" - ${LIBRARY_DIR}/csv/chunker.cc - ${LIBRARY_DIR}/csv/column_builder.cc - ${LIBRARY_DIR}/csv/column_decoder.cc - ${LIBRARY_DIR}/csv/converter.cc - ${LIBRARY_DIR}/csv/options.cc - ${LIBRARY_DIR}/csv/parser.cc - ${LIBRARY_DIR}/csv/reader.cc + "${LIBRARY_DIR}/csv/chunker.cc" + "${LIBRARY_DIR}/csv/column_builder.cc" + "${LIBRARY_DIR}/csv/column_decoder.cc" + "${LIBRARY_DIR}/csv/converter.cc" + "${LIBRARY_DIR}/csv/options.cc" + "${LIBRARY_DIR}/csv/parser.cc" + "${LIBRARY_DIR}/csv/reader.cc" - ${LIBRARY_DIR}/ipc/dictionary.cc - ${LIBRARY_DIR}/ipc/feather.cc - ${LIBRARY_DIR}/ipc/message.cc - ${LIBRARY_DIR}/ipc/metadata_internal.cc - ${LIBRARY_DIR}/ipc/options.cc - ${LIBRARY_DIR}/ipc/reader.cc - ${LIBRARY_DIR}/ipc/writer.cc + "${LIBRARY_DIR}/ipc/dictionary.cc" + "${LIBRARY_DIR}/ipc/feather.cc" + "${LIBRARY_DIR}/ipc/message.cc" + "${LIBRARY_DIR}/ipc/metadata_internal.cc" + "${LIBRARY_DIR}/ipc/options.cc" + "${LIBRARY_DIR}/ipc/reader.cc" + "${LIBRARY_DIR}/ipc/writer.cc" - ${LIBRARY_DIR}/io/buffered.cc - ${LIBRARY_DIR}/io/caching.cc - ${LIBRARY_DIR}/io/compressed.cc - ${LIBRARY_DIR}/io/file.cc - ${LIBRARY_DIR}/io/interfaces.cc - ${LIBRARY_DIR}/io/memory.cc - ${LIBRARY_DIR}/io/slow.cc + "${LIBRARY_DIR}/io/buffered.cc" + "${LIBRARY_DIR}/io/caching.cc" + "${LIBRARY_DIR}/io/compressed.cc" + "${LIBRARY_DIR}/io/file.cc" + "${LIBRARY_DIR}/io/interfaces.cc" + "${LIBRARY_DIR}/io/memory.cc" + "${LIBRARY_DIR}/io/slow.cc" - ${LIBRARY_DIR}/tensor/coo_converter.cc - ${LIBRARY_DIR}/tensor/csf_converter.cc - ${LIBRARY_DIR}/tensor/csx_converter.cc + "${LIBRARY_DIR}/tensor/coo_converter.cc" + "${LIBRARY_DIR}/tensor/csf_converter.cc" + "${LIBRARY_DIR}/tensor/csx_converter.cc" - ${LIBRARY_DIR}/util/basic_decimal.cc - ${LIBRARY_DIR}/util/bit_block_counter.cc - ${LIBRARY_DIR}/util/bit_run_reader.cc - ${LIBRARY_DIR}/util/bit_util.cc - ${LIBRARY_DIR}/util/bitmap.cc - ${LIBRARY_DIR}/util/bitmap_builders.cc - ${LIBRARY_DIR}/util/bitmap_ops.cc - ${LIBRARY_DIR}/util/bpacking.cc - ${LIBRARY_DIR}/util/compression.cc - ${LIBRARY_DIR}/util/compression_lz4.cc - ${LIBRARY_DIR}/util/compression_snappy.cc - ${LIBRARY_DIR}/util/compression_zlib.cc - ${LIBRARY_DIR}/util/compression_zstd.cc - ${LIBRARY_DIR}/util/cpu_info.cc - ${LIBRARY_DIR}/util/decimal.cc - ${LIBRARY_DIR}/util/delimiting.cc - ${LIBRARY_DIR}/util/formatting.cc - ${LIBRARY_DIR}/util/future.cc - ${LIBRARY_DIR}/util/int_util.cc - ${LIBRARY_DIR}/util/io_util.cc - ${LIBRARY_DIR}/util/iterator.cc - ${LIBRARY_DIR}/util/key_value_metadata.cc - ${LIBRARY_DIR}/util/logging.cc - ${LIBRARY_DIR}/util/memory.cc - ${LIBRARY_DIR}/util/string_builder.cc - ${LIBRARY_DIR}/util/string.cc - ${LIBRARY_DIR}/util/task_group.cc - ${LIBRARY_DIR}/util/thread_pool.cc - ${LIBRARY_DIR}/util/time.cc - ${LIBRARY_DIR}/util/trie.cc - ${LIBRARY_DIR}/util/utf8.cc - ${LIBRARY_DIR}/util/value_parsing.cc + "${LIBRARY_DIR}/util/basic_decimal.cc" + "${LIBRARY_DIR}/util/bit_block_counter.cc" + "${LIBRARY_DIR}/util/bit_run_reader.cc" + "${LIBRARY_DIR}/util/bit_util.cc" + "${LIBRARY_DIR}/util/bitmap.cc" + "${LIBRARY_DIR}/util/bitmap_builders.cc" + "${LIBRARY_DIR}/util/bitmap_ops.cc" + "${LIBRARY_DIR}/util/bpacking.cc" + "${LIBRARY_DIR}/util/compression.cc" + "${LIBRARY_DIR}/util/compression_lz4.cc" + "${LIBRARY_DIR}/util/compression_snappy.cc" + "${LIBRARY_DIR}/util/compression_zlib.cc" + "${LIBRARY_DIR}/util/compression_zstd.cc" + "${LIBRARY_DIR}/util/cpu_info.cc" + "${LIBRARY_DIR}/util/decimal.cc" + "${LIBRARY_DIR}/util/delimiting.cc" + "${LIBRARY_DIR}/util/formatting.cc" + "${LIBRARY_DIR}/util/future.cc" + "${LIBRARY_DIR}/util/int_util.cc" + "${LIBRARY_DIR}/util/io_util.cc" + "${LIBRARY_DIR}/util/iterator.cc" + "${LIBRARY_DIR}/util/key_value_metadata.cc" + "${LIBRARY_DIR}/util/logging.cc" + "${LIBRARY_DIR}/util/memory.cc" + "${LIBRARY_DIR}/util/string_builder.cc" + "${LIBRARY_DIR}/util/string.cc" + "${LIBRARY_DIR}/util/task_group.cc" + "${LIBRARY_DIR}/util/thread_pool.cc" + "${LIBRARY_DIR}/util/time.cc" + "${LIBRARY_DIR}/util/trie.cc" + "${LIBRARY_DIR}/util/utf8.cc" + "${LIBRARY_DIR}/util/value_parsing.cc" - ${LIBRARY_DIR}/vendored/base64.cpp + "${LIBRARY_DIR}/vendored/base64.cpp" ${ORC_SRCS} ) @@ -298,21 +298,21 @@ if (ZSTD_INCLUDE_DIR AND ZSTD_LIBRARY) endif () add_definitions(-DARROW_WITH_LZ4) -SET(ARROW_SRCS ${LIBRARY_DIR}/util/compression_lz4.cc ${ARROW_SRCS}) +SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_lz4.cc" ${ARROW_SRCS}) if (ARROW_WITH_SNAPPY) add_definitions(-DARROW_WITH_SNAPPY) - SET(ARROW_SRCS ${LIBRARY_DIR}/util/compression_snappy.cc ${ARROW_SRCS}) + SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_snappy.cc" ${ARROW_SRCS}) endif () if (ARROW_WITH_ZLIB) add_definitions(-DARROW_WITH_ZLIB) - SET(ARROW_SRCS ${LIBRARY_DIR}/util/compression_zlib.cc ${ARROW_SRCS}) + SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_zlib.cc" ${ARROW_SRCS}) endif () if (ARROW_WITH_ZSTD) add_definitions(-DARROW_WITH_ZSTD) - SET(ARROW_SRCS ${LIBRARY_DIR}/util/compression_zstd.cc ${ARROW_SRCS}) + SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_zstd.cc" ${ARROW_SRCS}) endif () @@ -327,8 +327,8 @@ if (USE_INTERNAL_PROTOBUF_LIBRARY) add_dependencies(${ARROW_LIBRARY} protoc) endif () -target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src) -target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/cpp/src) +target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src") +target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src") target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${Protobuf_LIBRARY}) target_link_libraries(${ARROW_LIBRARY} PRIVATE lz4) if (ARROW_WITH_SNAPPY) @@ -354,46 +354,46 @@ target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${FLATBUFFERS_INCLUDE # === parquet -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/parquet) -set(GEN_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/generated) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/parquet") +set(GEN_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/generated") # arrow/cpp/src/parquet/CMakeLists.txt set(PARQUET_SRCS - ${LIBRARY_DIR}/arrow/path_internal.cc - ${LIBRARY_DIR}/arrow/reader.cc - ${LIBRARY_DIR}/arrow/reader_internal.cc - ${LIBRARY_DIR}/arrow/schema.cc - ${LIBRARY_DIR}/arrow/schema_internal.cc - ${LIBRARY_DIR}/arrow/writer.cc - ${LIBRARY_DIR}/bloom_filter.cc - ${LIBRARY_DIR}/column_reader.cc - ${LIBRARY_DIR}/column_scanner.cc - ${LIBRARY_DIR}/column_writer.cc - ${LIBRARY_DIR}/deprecated_io.cc - ${LIBRARY_DIR}/encoding.cc - ${LIBRARY_DIR}/encryption.cc - ${LIBRARY_DIR}/encryption_internal.cc - ${LIBRARY_DIR}/file_reader.cc - ${LIBRARY_DIR}/file_writer.cc - ${LIBRARY_DIR}/internal_file_decryptor.cc - ${LIBRARY_DIR}/internal_file_encryptor.cc - ${LIBRARY_DIR}/level_conversion.cc - ${LIBRARY_DIR}/level_comparison.cc - ${LIBRARY_DIR}/metadata.cc - ${LIBRARY_DIR}/murmur3.cc - ${LIBRARY_DIR}/platform.cc - ${LIBRARY_DIR}/printer.cc - ${LIBRARY_DIR}/properties.cc - ${LIBRARY_DIR}/schema.cc - ${LIBRARY_DIR}/statistics.cc - ${LIBRARY_DIR}/types.cc + "${LIBRARY_DIR}/arrow/path_internal.cc" + "${LIBRARY_DIR}/arrow/reader.cc" + "${LIBRARY_DIR}/arrow/reader_internal.cc" + "${LIBRARY_DIR}/arrow/schema.cc" + "${LIBRARY_DIR}/arrow/schema_internal.cc" + "${LIBRARY_DIR}/arrow/writer.cc" + "${LIBRARY_DIR}/bloom_filter.cc" + "${LIBRARY_DIR}/column_reader.cc" + "${LIBRARY_DIR}/column_scanner.cc" + "${LIBRARY_DIR}/column_writer.cc" + "${LIBRARY_DIR}/deprecated_io.cc" + "${LIBRARY_DIR}/encoding.cc" + "${LIBRARY_DIR}/encryption.cc" + "${LIBRARY_DIR}/encryption_internal.cc" + "${LIBRARY_DIR}/file_reader.cc" + "${LIBRARY_DIR}/file_writer.cc" + "${LIBRARY_DIR}/internal_file_decryptor.cc" + "${LIBRARY_DIR}/internal_file_encryptor.cc" + "${LIBRARY_DIR}/level_conversion.cc" + "${LIBRARY_DIR}/level_comparison.cc" + "${LIBRARY_DIR}/metadata.cc" + "${LIBRARY_DIR}/murmur3.cc" + "${LIBRARY_DIR}/platform.cc" + "${LIBRARY_DIR}/printer.cc" + "${LIBRARY_DIR}/properties.cc" + "${LIBRARY_DIR}/schema.cc" + "${LIBRARY_DIR}/statistics.cc" + "${LIBRARY_DIR}/types.cc" - ${GEN_LIBRARY_DIR}/parquet_constants.cpp - ${GEN_LIBRARY_DIR}/parquet_types.cpp + "${GEN_LIBRARY_DIR}/parquet_constants.cpp" + "${GEN_LIBRARY_DIR}/parquet_types.cpp" ) -#list(TRANSFORM PARQUET_SRCS PREPEND ${LIBRARY_DIR}/) # cmake 3.12 +#list(TRANSFORM PARQUET_SRCS PREPEND "${LIBRARY_DIR}/") # cmake 3.12 add_library(${PARQUET_LIBRARY} ${PARQUET_SRCS}) -target_include_directories(${PARQUET_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src ${CMAKE_CURRENT_SOURCE_DIR}/cpp/src PRIVATE ${OPENSSL_INCLUDE_DIR}) -include(${ClickHouse_SOURCE_DIR}/contrib/thrift/build/cmake/ConfigureChecks.cmake) # makes config.h +target_include_directories(${PARQUET_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src" "${CMAKE_CURRENT_SOURCE_DIR}/cpp/src" PRIVATE ${OPENSSL_INCLUDE_DIR}) +include("${ClickHouse_SOURCE_DIR}/contrib/thrift/build/cmake/ConfigureChecks.cmake") # makes config.h target_link_libraries(${PARQUET_LIBRARY} PUBLIC ${ARROW_LIBRARY} PRIVATE ${THRIFT_LIBRARY} boost::headers_only boost::regex ${OPENSSL_LIBRARIES}) if (SANITIZE STREQUAL "undefined") @@ -403,9 +403,9 @@ endif () # === tools -set(TOOLS_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/tools/parquet) +set(TOOLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/tools/parquet") set(PARQUET_TOOLS parquet_dump_schema parquet_reader parquet_scan) foreach (TOOL ${PARQUET_TOOLS}) - add_executable(${TOOL} ${TOOLS_DIR}/${TOOL}.cc) + add_executable(${TOOL} "${TOOLS_DIR}/${TOOL}.cc") target_link_libraries(${TOOL} PRIVATE ${PARQUET_LIBRARY}) endforeach () diff --git a/contrib/avro b/contrib/avro index 92caca2d42f..1ee16d8c5a7 160000 --- a/contrib/avro +++ b/contrib/avro @@ -1 +1 @@ -Subproject commit 92caca2d42fc9a97e34e95f963593539d32ed331 +Subproject commit 1ee16d8c5a7808acff5cf0475f771195d9aa3faa diff --git a/contrib/avro-cmake/CMakeLists.txt b/contrib/avro-cmake/CMakeLists.txt index 052a19ee804..b56afd1598c 100644 --- a/contrib/avro-cmake/CMakeLists.txt +++ b/contrib/avro-cmake/CMakeLists.txt @@ -1,10 +1,10 @@ -set(AVROCPP_ROOT_DIR ${CMAKE_SOURCE_DIR}/contrib/avro/lang/c++) -set(AVROCPP_INCLUDE_DIR ${AVROCPP_ROOT_DIR}/api) -set(AVROCPP_SOURCE_DIR ${AVROCPP_ROOT_DIR}/impl) +set(AVROCPP_ROOT_DIR "${CMAKE_SOURCE_DIR}/contrib/avro/lang/c++") +set(AVROCPP_INCLUDE_DIR "${AVROCPP_ROOT_DIR}/api") +set(AVROCPP_SOURCE_DIR "${AVROCPP_ROOT_DIR}/impl") set (CMAKE_CXX_STANDARD 17) -if (EXISTS ${AVROCPP_ROOT_DIR}/../../share/VERSION.txt) +if (EXISTS "${AVROCPP_ROOT_DIR}/../../share/VERSION.txt") file(READ "${AVROCPP_ROOT_DIR}/../../share/VERSION.txt" AVRO_VERSION) endif() @@ -14,30 +14,30 @@ set (AVRO_VERSION_MAJOR ${AVRO_VERSION}) set (AVRO_VERSION_MINOR "0") set (AVROCPP_SOURCE_FILES - ${AVROCPP_SOURCE_DIR}/Compiler.cc - ${AVROCPP_SOURCE_DIR}/Node.cc - ${AVROCPP_SOURCE_DIR}/LogicalType.cc - ${AVROCPP_SOURCE_DIR}/NodeImpl.cc - ${AVROCPP_SOURCE_DIR}/ResolverSchema.cc - ${AVROCPP_SOURCE_DIR}/Schema.cc - ${AVROCPP_SOURCE_DIR}/Types.cc - ${AVROCPP_SOURCE_DIR}/ValidSchema.cc - ${AVROCPP_SOURCE_DIR}/Zigzag.cc - ${AVROCPP_SOURCE_DIR}/BinaryEncoder.cc - ${AVROCPP_SOURCE_DIR}/BinaryDecoder.cc - ${AVROCPP_SOURCE_DIR}/Stream.cc - ${AVROCPP_SOURCE_DIR}/FileStream.cc - ${AVROCPP_SOURCE_DIR}/Generic.cc - ${AVROCPP_SOURCE_DIR}/GenericDatum.cc - ${AVROCPP_SOURCE_DIR}/DataFile.cc - ${AVROCPP_SOURCE_DIR}/parsing/Symbol.cc - ${AVROCPP_SOURCE_DIR}/parsing/ValidatingCodec.cc - ${AVROCPP_SOURCE_DIR}/parsing/JsonCodec.cc - ${AVROCPP_SOURCE_DIR}/parsing/ResolvingDecoder.cc - ${AVROCPP_SOURCE_DIR}/json/JsonIO.cc - ${AVROCPP_SOURCE_DIR}/json/JsonDom.cc - ${AVROCPP_SOURCE_DIR}/Resolver.cc - ${AVROCPP_SOURCE_DIR}/Validator.cc + "${AVROCPP_SOURCE_DIR}/Compiler.cc" + "${AVROCPP_SOURCE_DIR}/Node.cc" + "${AVROCPP_SOURCE_DIR}/LogicalType.cc" + "${AVROCPP_SOURCE_DIR}/NodeImpl.cc" + "${AVROCPP_SOURCE_DIR}/ResolverSchema.cc" + "${AVROCPP_SOURCE_DIR}/Schema.cc" + "${AVROCPP_SOURCE_DIR}/Types.cc" + "${AVROCPP_SOURCE_DIR}/ValidSchema.cc" + "${AVROCPP_SOURCE_DIR}/Zigzag.cc" + "${AVROCPP_SOURCE_DIR}/BinaryEncoder.cc" + "${AVROCPP_SOURCE_DIR}/BinaryDecoder.cc" + "${AVROCPP_SOURCE_DIR}/Stream.cc" + "${AVROCPP_SOURCE_DIR}/FileStream.cc" + "${AVROCPP_SOURCE_DIR}/Generic.cc" + "${AVROCPP_SOURCE_DIR}/GenericDatum.cc" + "${AVROCPP_SOURCE_DIR}/DataFile.cc" + "${AVROCPP_SOURCE_DIR}/parsing/Symbol.cc" + "${AVROCPP_SOURCE_DIR}/parsing/ValidatingCodec.cc" + "${AVROCPP_SOURCE_DIR}/parsing/JsonCodec.cc" + "${AVROCPP_SOURCE_DIR}/parsing/ResolvingDecoder.cc" + "${AVROCPP_SOURCE_DIR}/json/JsonIO.cc" + "${AVROCPP_SOURCE_DIR}/json/JsonDom.cc" + "${AVROCPP_SOURCE_DIR}/Resolver.cc" + "${AVROCPP_SOURCE_DIR}/Validator.cc" ) add_library (avrocpp ${AVROCPP_SOURCE_FILES}) @@ -63,7 +63,7 @@ target_compile_options(avrocpp PRIVATE ${SUPPRESS_WARNINGS}) # create a symlink to include headers with ADD_CUSTOM_TARGET(avro_symlink_headers ALL - COMMAND ${CMAKE_COMMAND} -E make_directory ${AVROCPP_ROOT_DIR}/include - COMMAND ${CMAKE_COMMAND} -E create_symlink ${AVROCPP_ROOT_DIR}/api ${AVROCPP_ROOT_DIR}/include/avro + COMMAND ${CMAKE_COMMAND} -E make_directory "${AVROCPP_ROOT_DIR}/include" + COMMAND ${CMAKE_COMMAND} -E create_symlink "${AVROCPP_ROOT_DIR}/api" "${AVROCPP_ROOT_DIR}/include/avro" ) add_dependencies(avrocpp avro_symlink_headers) diff --git a/contrib/aws-s3-cmake/CMakeLists.txt b/contrib/aws-s3-cmake/CMakeLists.txt index 02dee91c70c..723ceac3991 100644 --- a/contrib/aws-s3-cmake/CMakeLists.txt +++ b/contrib/aws-s3-cmake/CMakeLists.txt @@ -1,8 +1,8 @@ -SET(AWS_S3_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3) -SET(AWS_CORE_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core) -SET(AWS_CHECKSUMS_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-checksums) -SET(AWS_COMMON_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-c-common) -SET(AWS_EVENT_STREAM_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream) +SET(AWS_S3_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3") +SET(AWS_CORE_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core") +SET(AWS_CHECKSUMS_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums") +SET(AWS_COMMON_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common") +SET(AWS_EVENT_STREAM_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream") OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF) configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in" diff --git a/contrib/base64-cmake/CMakeLists.txt b/contrib/base64-cmake/CMakeLists.txt index a295ee45b84..4ebb4e68728 100644 --- a/contrib/base64-cmake/CMakeLists.txt +++ b/contrib/base64-cmake/CMakeLists.txt @@ -1,11 +1,11 @@ -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/base64) +SET(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/base64") -add_library(base64_scalar OBJECT ${LIBRARY_DIR}/turbob64c.c ${LIBRARY_DIR}/turbob64d.c) -add_library(base64_ssse3 OBJECT ${LIBRARY_DIR}/turbob64sse.c) # This file also contains code for ARM NEON +add_library(base64_scalar OBJECT "${LIBRARY_DIR}/turbob64c.c" "${LIBRARY_DIR}/turbob64d.c") +add_library(base64_ssse3 OBJECT "${LIBRARY_DIR}/turbob64sse.c") # This file also contains code for ARM NEON if (ARCH_AMD64) - add_library(base64_avx OBJECT ${LIBRARY_DIR}/turbob64sse.c) # This is not a mistake. One file is compiled twice. - add_library(base64_avx2 OBJECT ${LIBRARY_DIR}/turbob64avx2.c) + add_library(base64_avx OBJECT "${LIBRARY_DIR}/turbob64sse.c") # This is not a mistake. One file is compiled twice. + add_library(base64_avx2 OBJECT "${LIBRARY_DIR}/turbob64avx2.c") endif () target_compile_options(base64_scalar PRIVATE -falign-loops) diff --git a/contrib/boost b/contrib/boost index ee24fa55bc4..1ccbb5a522a 160000 --- a/contrib/boost +++ b/contrib/boost @@ -1 +1 @@ -Subproject commit ee24fa55bc46e4d2ce7d0d052cc5a0d9b1be8c36 +Subproject commit 1ccbb5a522a571ce83b606dbc2e1011c42ecccfb diff --git a/contrib/boost-cmake/CMakeLists.txt b/contrib/boost-cmake/CMakeLists.txt index b9298f59f2b..9f6c5b1255d 100644 --- a/contrib/boost-cmake/CMakeLists.txt +++ b/contrib/boost-cmake/CMakeLists.txt @@ -56,19 +56,19 @@ endif() if (NOT EXTERNAL_BOOST_FOUND) set (USE_INTERNAL_BOOST_LIBRARY 1) - set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/boost) + set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/boost") # filesystem set (SRCS_FILESYSTEM - ${LIBRARY_DIR}/libs/filesystem/src/codecvt_error_category.cpp - ${LIBRARY_DIR}/libs/filesystem/src/operations.cpp - ${LIBRARY_DIR}/libs/filesystem/src/path_traits.cpp - ${LIBRARY_DIR}/libs/filesystem/src/path.cpp - ${LIBRARY_DIR}/libs/filesystem/src/portability.cpp - ${LIBRARY_DIR}/libs/filesystem/src/unique_path.cpp - ${LIBRARY_DIR}/libs/filesystem/src/utf8_codecvt_facet.cpp - ${LIBRARY_DIR}/libs/filesystem/src/windows_file_codecvt.cpp + "${LIBRARY_DIR}/libs/filesystem/src/codecvt_error_category.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/operations.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/path_traits.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/path.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/portability.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/unique_path.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/utf8_codecvt_facet.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/windows_file_codecvt.cpp" ) add_library (_boost_filesystem ${SRCS_FILESYSTEM}) @@ -88,10 +88,10 @@ if (NOT EXTERNAL_BOOST_FOUND) # iostreams set (SRCS_IOSTREAMS - ${LIBRARY_DIR}/libs/iostreams/src/file_descriptor.cpp - ${LIBRARY_DIR}/libs/iostreams/src/gzip.cpp - ${LIBRARY_DIR}/libs/iostreams/src/mapped_file.cpp - ${LIBRARY_DIR}/libs/iostreams/src/zlib.cpp + "${LIBRARY_DIR}/libs/iostreams/src/file_descriptor.cpp" + "${LIBRARY_DIR}/libs/iostreams/src/gzip.cpp" + "${LIBRARY_DIR}/libs/iostreams/src/mapped_file.cpp" + "${LIBRARY_DIR}/libs/iostreams/src/zlib.cpp" ) add_library (_boost_iostreams ${SRCS_IOSTREAMS}) @@ -102,17 +102,17 @@ if (NOT EXTERNAL_BOOST_FOUND) # program_options set (SRCS_PROGRAM_OPTIONS - ${LIBRARY_DIR}/libs/program_options/src/cmdline.cpp - ${LIBRARY_DIR}/libs/program_options/src/config_file.cpp - ${LIBRARY_DIR}/libs/program_options/src/convert.cpp - ${LIBRARY_DIR}/libs/program_options/src/options_description.cpp - ${LIBRARY_DIR}/libs/program_options/src/parsers.cpp - ${LIBRARY_DIR}/libs/program_options/src/positional_options.cpp - ${LIBRARY_DIR}/libs/program_options/src/split.cpp - ${LIBRARY_DIR}/libs/program_options/src/utf8_codecvt_facet.cpp - ${LIBRARY_DIR}/libs/program_options/src/value_semantic.cpp - ${LIBRARY_DIR}/libs/program_options/src/variables_map.cpp - ${LIBRARY_DIR}/libs/program_options/src/winmain.cpp + "${LIBRARY_DIR}/libs/program_options/src/cmdline.cpp" + "${LIBRARY_DIR}/libs/program_options/src/config_file.cpp" + "${LIBRARY_DIR}/libs/program_options/src/convert.cpp" + "${LIBRARY_DIR}/libs/program_options/src/options_description.cpp" + "${LIBRARY_DIR}/libs/program_options/src/parsers.cpp" + "${LIBRARY_DIR}/libs/program_options/src/positional_options.cpp" + "${LIBRARY_DIR}/libs/program_options/src/split.cpp" + "${LIBRARY_DIR}/libs/program_options/src/utf8_codecvt_facet.cpp" + "${LIBRARY_DIR}/libs/program_options/src/value_semantic.cpp" + "${LIBRARY_DIR}/libs/program_options/src/variables_map.cpp" + "${LIBRARY_DIR}/libs/program_options/src/winmain.cpp" ) add_library (_boost_program_options ${SRCS_PROGRAM_OPTIONS}) @@ -122,24 +122,24 @@ if (NOT EXTERNAL_BOOST_FOUND) # regex set (SRCS_REGEX - ${LIBRARY_DIR}/libs/regex/src/c_regex_traits.cpp - ${LIBRARY_DIR}/libs/regex/src/cpp_regex_traits.cpp - ${LIBRARY_DIR}/libs/regex/src/cregex.cpp - ${LIBRARY_DIR}/libs/regex/src/fileiter.cpp - ${LIBRARY_DIR}/libs/regex/src/icu.cpp - ${LIBRARY_DIR}/libs/regex/src/instances.cpp - ${LIBRARY_DIR}/libs/regex/src/internals.hpp - ${LIBRARY_DIR}/libs/regex/src/posix_api.cpp - ${LIBRARY_DIR}/libs/regex/src/regex_debug.cpp - ${LIBRARY_DIR}/libs/regex/src/regex_raw_buffer.cpp - ${LIBRARY_DIR}/libs/regex/src/regex_traits_defaults.cpp - ${LIBRARY_DIR}/libs/regex/src/regex.cpp - ${LIBRARY_DIR}/libs/regex/src/static_mutex.cpp - ${LIBRARY_DIR}/libs/regex/src/usinstances.cpp - ${LIBRARY_DIR}/libs/regex/src/w32_regex_traits.cpp - ${LIBRARY_DIR}/libs/regex/src/wc_regex_traits.cpp - ${LIBRARY_DIR}/libs/regex/src/wide_posix_api.cpp - ${LIBRARY_DIR}/libs/regex/src/winstances.cpp + "${LIBRARY_DIR}/libs/regex/src/c_regex_traits.cpp" + "${LIBRARY_DIR}/libs/regex/src/cpp_regex_traits.cpp" + "${LIBRARY_DIR}/libs/regex/src/cregex.cpp" + "${LIBRARY_DIR}/libs/regex/src/fileiter.cpp" + "${LIBRARY_DIR}/libs/regex/src/icu.cpp" + "${LIBRARY_DIR}/libs/regex/src/instances.cpp" + "${LIBRARY_DIR}/libs/regex/src/internals.hpp" + "${LIBRARY_DIR}/libs/regex/src/posix_api.cpp" + "${LIBRARY_DIR}/libs/regex/src/regex_debug.cpp" + "${LIBRARY_DIR}/libs/regex/src/regex_raw_buffer.cpp" + "${LIBRARY_DIR}/libs/regex/src/regex_traits_defaults.cpp" + "${LIBRARY_DIR}/libs/regex/src/regex.cpp" + "${LIBRARY_DIR}/libs/regex/src/static_mutex.cpp" + "${LIBRARY_DIR}/libs/regex/src/usinstances.cpp" + "${LIBRARY_DIR}/libs/regex/src/w32_regex_traits.cpp" + "${LIBRARY_DIR}/libs/regex/src/wc_regex_traits.cpp" + "${LIBRARY_DIR}/libs/regex/src/wide_posix_api.cpp" + "${LIBRARY_DIR}/libs/regex/src/winstances.cpp" ) add_library (_boost_regex ${SRCS_REGEX}) @@ -149,7 +149,7 @@ if (NOT EXTERNAL_BOOST_FOUND) # system set (SRCS_SYSTEM - ${LIBRARY_DIR}/libs/system/src/error_code.cpp + "${LIBRARY_DIR}/libs/system/src/error_code.cpp" ) add_library (_boost_system ${SRCS_SYSTEM}) @@ -160,6 +160,12 @@ if (NOT EXTERNAL_BOOST_FOUND) enable_language(ASM) SET(ASM_OPTIONS "-x assembler-with-cpp") + set (SRCS_CONTEXT + "${LIBRARY_DIR}/libs/context/src/dummy.cpp" + "${LIBRARY_DIR}/libs/context/src/execution_context.cpp" + "${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp" + ) + if (SANITIZE AND (SANITIZE STREQUAL "address" OR SANITIZE STREQUAL "thread")) add_compile_definitions(BOOST_USE_UCONTEXT) @@ -169,39 +175,34 @@ if (NOT EXTERNAL_BOOST_FOUND) add_compile_definitions(BOOST_USE_TSAN) endif() - set (SRCS_CONTEXT - ${LIBRARY_DIR}/libs/context/src/fiber.cpp - ${LIBRARY_DIR}/libs/context/src/continuation.cpp - ${LIBRARY_DIR}/libs/context/src/dummy.cpp - ${LIBRARY_DIR}/libs/context/src/execution_context.cpp - ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp + set (SRCS_CONTEXT ${SRCS_CONTEXT} + "${LIBRARY_DIR}/libs/context/src/fiber.cpp" + "${LIBRARY_DIR}/libs/context/src/continuation.cpp" ) - elseif (ARCH_ARM) - set (SRCS_CONTEXT - ${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/make_arm64_aapcs_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/ontop_arm64_aapcs_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/dummy.cpp - ${LIBRARY_DIR}/libs/context/src/execution_context.cpp - ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp + endif() + if (ARCH_ARM) + set (SRCS_CONTEXT ${SRCS_CONTEXT} + "${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_arm64_aapcs_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_arm64_aapcs_elf_gas.S" + ) + elseif (ARCH_PPC64LE) + set (SRCS_CONTEXT ${SRCS_CONTEXT} + "${LIBRARY_DIR}/libs/context/src/asm/jump_ppc64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_ppc64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_ppc64_sysv_elf_gas.S" ) elseif(OS_DARWIN) - set (SRCS_CONTEXT - ${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_macho_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_macho_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_macho_gas.S - ${LIBRARY_DIR}/libs/context/src/dummy.cpp - ${LIBRARY_DIR}/libs/context/src/execution_context.cpp - ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp + set (SRCS_CONTEXT ${SRCS_CONTEXT} + "${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_macho_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_macho_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_macho_gas.S" ) else() - set (SRCS_CONTEXT - ${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/dummy.cpp - ${LIBRARY_DIR}/libs/context/src/execution_context.cpp - ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp + set (SRCS_CONTEXT ${SRCS_CONTEXT} + "${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S" ) endif() @@ -212,9 +213,9 @@ if (NOT EXTERNAL_BOOST_FOUND) # coroutine set (SRCS_COROUTINE - ${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp - ${LIBRARY_DIR}/libs/coroutine/exceptions.cpp - ${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp + "${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp" + "${LIBRARY_DIR}/libs/coroutine/exceptions.cpp" + "${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp" ) add_library (_boost_coroutine ${SRCS_COROUTINE}) add_library (boost::coroutine ALIAS _boost_coroutine) diff --git a/contrib/boringssl b/contrib/boringssl index fd9ce1a0406..a6a2e2ab3e4 160000 --- a/contrib/boringssl +++ b/contrib/boringssl @@ -1 +1 @@ -Subproject commit fd9ce1a0406f571507068b9555d0b545b8a18332 +Subproject commit a6a2e2ab3e44d97ce98e51c558e989f211de7eb3 diff --git a/contrib/boringssl-cmake/CMakeLists.txt b/contrib/boringssl-cmake/CMakeLists.txt index 017a8a64c0e..9d8c6ca6083 100644 --- a/contrib/boringssl-cmake/CMakeLists.txt +++ b/contrib/boringssl-cmake/CMakeLists.txt @@ -8,7 +8,7 @@ cmake_minimum_required(VERSION 3.0) project(BoringSSL LANGUAGES C CXX) -set(BORINGSSL_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/boringssl) +set(BORINGSSL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/boringssl") if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(CLANG 1) @@ -16,7 +16,7 @@ endif() if(CMAKE_COMPILER_IS_GNUCXX OR CLANG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fvisibility=hidden -fno-common -fno-exceptions -fno-rtti") - if(APPLE) + if(APPLE AND CLANG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") endif() @@ -130,7 +130,7 @@ if(BUILD_SHARED_LIBS) set(CMAKE_POSITION_INDEPENDENT_CODE TRUE) endif() -include_directories(${BORINGSSL_SOURCE_DIR}/include) +include_directories("${BORINGSSL_SOURCE_DIR}/include") set( CRYPTO_ios_aarch64_SOURCES @@ -192,8 +192,8 @@ set( linux-arm/crypto/fipsmodule/sha512-armv4.S linux-arm/crypto/fipsmodule/vpaes-armv7.S linux-arm/crypto/test/trampoline-armv4.S - ${BORINGSSL_SOURCE_DIR}/crypto/curve25519/asm/x25519-asm-arm.S - ${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_arm_asm.S + "${BORINGSSL_SOURCE_DIR}/crypto/curve25519/asm/x25519-asm-arm.S" + "${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_arm_asm.S" ) set( @@ -244,7 +244,7 @@ set( linux-x86_64/crypto/fipsmodule/x86_64-mont.S linux-x86_64/crypto/fipsmodule/x86_64-mont5.S linux-x86_64/crypto/test/trampoline-x86_64.S - ${BORINGSSL_SOURCE_DIR}/crypto/hrss/asm/poly_rq_mul.S + "${BORINGSSL_SOURCE_DIR}/crypto/hrss/asm/poly_rq_mul.S" ) set( @@ -348,300 +348,300 @@ add_library( ${CRYPTO_ARCH_SOURCES} err_data.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_bitstr.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_bool.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_d2i_fp.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_dup.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_enum.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_gentm.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_i2d_fp.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_int.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_mbstr.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_object.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_octet.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_print.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_strnid.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_time.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_type.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_utctm.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_utf8.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn1_lib.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn1_par.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn_pack.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_enum.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_int.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_string.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_dec.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_enc.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_fre.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_new.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_typ.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_utl.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/time_support.c - ${BORINGSSL_SOURCE_DIR}/crypto/base64/base64.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/bio.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/bio_mem.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/connect.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/fd.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/file.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/hexdump.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/pair.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/printf.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/socket.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/socket_helper.c - ${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/bn_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/convert.c - ${BORINGSSL_SOURCE_DIR}/crypto/buf/buf.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/asn1_compat.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/ber.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/cbb.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/cbs.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/unicode.c - ${BORINGSSL_SOURCE_DIR}/crypto/chacha/chacha.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/cipher_extra.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/derive_key.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesccm.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesctrhmac.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesgcmsiv.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_chacha20poly1305.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_null.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_rc2.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_rc4.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_tls.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/tls_cbc.c - ${BORINGSSL_SOURCE_DIR}/crypto/cmac/cmac.c - ${BORINGSSL_SOURCE_DIR}/crypto/conf/conf.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-fuchsia.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-linux.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm-linux.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-intel.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-ppc64le.c - ${BORINGSSL_SOURCE_DIR}/crypto/crypto.c - ${BORINGSSL_SOURCE_DIR}/crypto/curve25519/curve25519.c - ${BORINGSSL_SOURCE_DIR}/crypto/curve25519/spake25519.c - ${BORINGSSL_SOURCE_DIR}/crypto/dh_extra/dh_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/dh_extra/params.c - ${BORINGSSL_SOURCE_DIR}/crypto/digest_extra/digest_extra.c - ${BORINGSSL_SOURCE_DIR}/crypto/dsa/dsa.c - ${BORINGSSL_SOURCE_DIR}/crypto/dsa/dsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/ec_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/ec_derive.c - ${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/hash_to_curve.c - ${BORINGSSL_SOURCE_DIR}/crypto/ecdh_extra/ecdh_extra.c - ${BORINGSSL_SOURCE_DIR}/crypto/ecdsa_extra/ecdsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/engine/engine.c - ${BORINGSSL_SOURCE_DIR}/crypto/err/err.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/digestsign.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/evp.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/evp_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/evp_ctx.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_dsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ec.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ec_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ed25519.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ed25519_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_rsa.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_rsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_x25519.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_x25519_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/pbkdf.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/print.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/scrypt.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/sign.c - ${BORINGSSL_SOURCE_DIR}/crypto/ex_data.c - ${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/bcm.c - ${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/fips_shared_support.c - ${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/is_fips.c - ${BORINGSSL_SOURCE_DIR}/crypto/hkdf/hkdf.c - ${BORINGSSL_SOURCE_DIR}/crypto/hpke/hpke.c - ${BORINGSSL_SOURCE_DIR}/crypto/hrss/hrss.c - ${BORINGSSL_SOURCE_DIR}/crypto/lhash/lhash.c - ${BORINGSSL_SOURCE_DIR}/crypto/mem.c - ${BORINGSSL_SOURCE_DIR}/crypto/obj/obj.c - ${BORINGSSL_SOURCE_DIR}/crypto/obj/obj_xref.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_all.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_info.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_lib.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_oth.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_pk8.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_pkey.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_xaux.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs7/pkcs7.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs7/pkcs7_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/p5_pbev2.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/pkcs8.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/pkcs8_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305.c - ${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_arm.c - ${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_vec.c - ${BORINGSSL_SOURCE_DIR}/crypto/pool/pool.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/deterministic.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/forkunsafe.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/fuchsia.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/passive.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/rand_extra.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/windows.c - ${BORINGSSL_SOURCE_DIR}/crypto/rc4/rc4.c - ${BORINGSSL_SOURCE_DIR}/crypto/refcount_c11.c - ${BORINGSSL_SOURCE_DIR}/crypto/refcount_lock.c - ${BORINGSSL_SOURCE_DIR}/crypto/rsa_extra/rsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/rsa_extra/rsa_print.c - ${BORINGSSL_SOURCE_DIR}/crypto/siphash/siphash.c - ${BORINGSSL_SOURCE_DIR}/crypto/stack/stack.c - ${BORINGSSL_SOURCE_DIR}/crypto/thread.c - ${BORINGSSL_SOURCE_DIR}/crypto/thread_none.c - ${BORINGSSL_SOURCE_DIR}/crypto/thread_pthread.c - ${BORINGSSL_SOURCE_DIR}/crypto/thread_win.c - ${BORINGSSL_SOURCE_DIR}/crypto/trust_token/pmbtoken.c - ${BORINGSSL_SOURCE_DIR}/crypto/trust_token/trust_token.c - ${BORINGSSL_SOURCE_DIR}/crypto/trust_token/voprf.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/a_digest.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/a_sign.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/a_strex.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/a_verify.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/algorithm.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/asn1_gen.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/by_dir.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/by_file.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/i2d_pr.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/rsa_pss.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/t_crl.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/t_req.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/t_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/t_x509a.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_att.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_cmp.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_d2.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_def.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_ext.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_lu.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_obj.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_r2x.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_req.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_set.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_trs.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_txt.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_v3.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_vfy.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_vpm.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509cset.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509name.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509rset.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509spki.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_algor.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_all.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_attrib.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_crl.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_exten.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_info.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_name.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_pkey.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_pubkey.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_req.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_sig.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_spki.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_val.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_x509a.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_cache.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_data.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_lib.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_map.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_node.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_tree.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_akey.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_akeya.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_alt.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_bcons.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_bitst.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_conf.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_cpols.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_crld.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_enum.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_extku.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_genn.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ia5.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_info.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_int.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_lib.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ncons.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ocsp.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pci.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pcia.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pcons.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pmaps.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_prn.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_purp.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_skey.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_utl.c + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_bitstr.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_bool.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_d2i_fp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_dup.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_enum.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_gentm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_i2d_fp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_int.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_mbstr.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_object.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_octet.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_print.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_strnid.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_time.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_type.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_utctm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_utf8.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn1_lib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn1_par.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn_pack.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_enum.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_int.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_string.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_dec.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_enc.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_fre.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_new.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_typ.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_utl.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/time_support.c" + "${BORINGSSL_SOURCE_DIR}/crypto/base64/base64.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/bio.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/bio_mem.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/connect.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/fd.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/file.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/hexdump.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/pair.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/printf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/socket.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/socket_helper.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/bn_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/convert.c" + "${BORINGSSL_SOURCE_DIR}/crypto/buf/buf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/asn1_compat.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/ber.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/cbb.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/cbs.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/unicode.c" + "${BORINGSSL_SOURCE_DIR}/crypto/chacha/chacha.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/cipher_extra.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/derive_key.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesccm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesctrhmac.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesgcmsiv.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_chacha20poly1305.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_null.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_rc2.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_rc4.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_tls.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/tls_cbc.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cmac/cmac.c" + "${BORINGSSL_SOURCE_DIR}/crypto/conf/conf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-fuchsia.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-linux.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm-linux.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-intel.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-ppc64le.c" + "${BORINGSSL_SOURCE_DIR}/crypto/crypto.c" + "${BORINGSSL_SOURCE_DIR}/crypto/curve25519/curve25519.c" + "${BORINGSSL_SOURCE_DIR}/crypto/curve25519/spake25519.c" + "${BORINGSSL_SOURCE_DIR}/crypto/dh_extra/dh_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/dh_extra/params.c" + "${BORINGSSL_SOURCE_DIR}/crypto/digest_extra/digest_extra.c" + "${BORINGSSL_SOURCE_DIR}/crypto/dsa/dsa.c" + "${BORINGSSL_SOURCE_DIR}/crypto/dsa/dsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/ec_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/ec_derive.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/hash_to_curve.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ecdh_extra/ecdh_extra.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ecdsa_extra/ecdsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/engine/engine.c" + "${BORINGSSL_SOURCE_DIR}/crypto/err/err.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/digestsign.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/evp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/evp_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/evp_ctx.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_dsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ec.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ec_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ed25519.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ed25519_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_rsa.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_rsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_x25519.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_x25519_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/pbkdf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/print.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/scrypt.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/sign.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ex_data.c" + "${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/bcm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/fips_shared_support.c" + "${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/is_fips.c" + "${BORINGSSL_SOURCE_DIR}/crypto/hkdf/hkdf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/hpke/hpke.c" + "${BORINGSSL_SOURCE_DIR}/crypto/hrss/hrss.c" + "${BORINGSSL_SOURCE_DIR}/crypto/lhash/lhash.c" + "${BORINGSSL_SOURCE_DIR}/crypto/mem.c" + "${BORINGSSL_SOURCE_DIR}/crypto/obj/obj.c" + "${BORINGSSL_SOURCE_DIR}/crypto/obj/obj_xref.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_all.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_info.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_lib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_oth.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_pk8.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_pkey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_xaux.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs7/pkcs7.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs7/pkcs7_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/p5_pbev2.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/pkcs8.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/pkcs8_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305.c" + "${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_arm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_vec.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pool/pool.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/deterministic.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/forkunsafe.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/fuchsia.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/passive.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/rand_extra.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/windows.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rc4/rc4.c" + "${BORINGSSL_SOURCE_DIR}/crypto/refcount_c11.c" + "${BORINGSSL_SOURCE_DIR}/crypto/refcount_lock.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rsa_extra/rsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rsa_extra/rsa_print.c" + "${BORINGSSL_SOURCE_DIR}/crypto/siphash/siphash.c" + "${BORINGSSL_SOURCE_DIR}/crypto/stack/stack.c" + "${BORINGSSL_SOURCE_DIR}/crypto/thread.c" + "${BORINGSSL_SOURCE_DIR}/crypto/thread_none.c" + "${BORINGSSL_SOURCE_DIR}/crypto/thread_pthread.c" + "${BORINGSSL_SOURCE_DIR}/crypto/thread_win.c" + "${BORINGSSL_SOURCE_DIR}/crypto/trust_token/pmbtoken.c" + "${BORINGSSL_SOURCE_DIR}/crypto/trust_token/trust_token.c" + "${BORINGSSL_SOURCE_DIR}/crypto/trust_token/voprf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/a_digest.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/a_sign.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/a_strex.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/a_verify.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/algorithm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/asn1_gen.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/by_dir.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/by_file.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/i2d_pr.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/rsa_pss.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/t_crl.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/t_req.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/t_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/t_x509a.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_att.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_cmp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_d2.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_def.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_ext.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_lu.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_obj.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_r2x.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_req.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_set.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_trs.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_txt.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_v3.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_vfy.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_vpm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509cset.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509name.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509rset.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509spki.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_algor.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_all.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_attrib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_crl.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_exten.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_info.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_name.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_pkey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_pubkey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_req.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_sig.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_spki.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_val.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_x509a.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_cache.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_data.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_lib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_map.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_node.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_tree.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_akey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_akeya.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_alt.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_bcons.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_bitst.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_conf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_cpols.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_crld.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_enum.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_extku.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_genn.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ia5.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_info.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_int.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_lib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ncons.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ocsp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pci.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pcia.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pcons.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pmaps.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_prn.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_purp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_skey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_utl.c" ) add_library( ssl - ${BORINGSSL_SOURCE_DIR}/ssl/bio_ssl.cc - ${BORINGSSL_SOURCE_DIR}/ssl/d1_both.cc - ${BORINGSSL_SOURCE_DIR}/ssl/d1_lib.cc - ${BORINGSSL_SOURCE_DIR}/ssl/d1_pkt.cc - ${BORINGSSL_SOURCE_DIR}/ssl/d1_srtp.cc - ${BORINGSSL_SOURCE_DIR}/ssl/dtls_method.cc - ${BORINGSSL_SOURCE_DIR}/ssl/dtls_record.cc - ${BORINGSSL_SOURCE_DIR}/ssl/handoff.cc - ${BORINGSSL_SOURCE_DIR}/ssl/handshake.cc - ${BORINGSSL_SOURCE_DIR}/ssl/handshake_client.cc - ${BORINGSSL_SOURCE_DIR}/ssl/handshake_server.cc - ${BORINGSSL_SOURCE_DIR}/ssl/s3_both.cc - ${BORINGSSL_SOURCE_DIR}/ssl/s3_lib.cc - ${BORINGSSL_SOURCE_DIR}/ssl/s3_pkt.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_aead_ctx.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_asn1.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_buffer.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_cert.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_cipher.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_file.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_key_share.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_lib.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_privkey.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_session.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_stat.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_transcript.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_versions.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_x509.cc - ${BORINGSSL_SOURCE_DIR}/ssl/t1_enc.cc - ${BORINGSSL_SOURCE_DIR}/ssl/t1_lib.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls13_both.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls13_client.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls13_enc.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls13_server.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls_method.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls_record.cc + "${BORINGSSL_SOURCE_DIR}/ssl/bio_ssl.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/d1_both.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/d1_lib.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/d1_pkt.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/d1_srtp.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/dtls_method.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/dtls_record.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/handoff.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/handshake.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/handshake_client.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/handshake_server.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/s3_both.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/s3_lib.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/s3_pkt.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_aead_ctx.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_asn1.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_buffer.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_cert.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_cipher.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_file.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_key_share.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_lib.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_privkey.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_session.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_stat.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_transcript.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_versions.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_x509.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/t1_enc.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/t1_lib.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls13_both.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls13_client.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls13_enc.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls13_server.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls_method.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls_record.cc" - ${BORINGSSL_SOURCE_DIR}/decrepit/ssl/ssl_decrepit.c - ${BORINGSSL_SOURCE_DIR}/decrepit/cfb/cfb.c + "${BORINGSSL_SOURCE_DIR}/decrepit/ssl/ssl_decrepit.c" + "${BORINGSSL_SOURCE_DIR}/decrepit/cfb/cfb.c" ) add_executable( bssl - ${BORINGSSL_SOURCE_DIR}/tool/args.cc - ${BORINGSSL_SOURCE_DIR}/tool/ciphers.cc - ${BORINGSSL_SOURCE_DIR}/tool/client.cc - ${BORINGSSL_SOURCE_DIR}/tool/const.cc - ${BORINGSSL_SOURCE_DIR}/tool/digest.cc - ${BORINGSSL_SOURCE_DIR}/tool/fd.cc - ${BORINGSSL_SOURCE_DIR}/tool/file.cc - ${BORINGSSL_SOURCE_DIR}/tool/generate_ed25519.cc - ${BORINGSSL_SOURCE_DIR}/tool/genrsa.cc - ${BORINGSSL_SOURCE_DIR}/tool/pkcs12.cc - ${BORINGSSL_SOURCE_DIR}/tool/rand.cc - ${BORINGSSL_SOURCE_DIR}/tool/server.cc - ${BORINGSSL_SOURCE_DIR}/tool/sign.cc - ${BORINGSSL_SOURCE_DIR}/tool/speed.cc - ${BORINGSSL_SOURCE_DIR}/tool/tool.cc - ${BORINGSSL_SOURCE_DIR}/tool/transport_common.cc + "${BORINGSSL_SOURCE_DIR}/tool/args.cc" + "${BORINGSSL_SOURCE_DIR}/tool/ciphers.cc" + "${BORINGSSL_SOURCE_DIR}/tool/client.cc" + "${BORINGSSL_SOURCE_DIR}/tool/const.cc" + "${BORINGSSL_SOURCE_DIR}/tool/digest.cc" + "${BORINGSSL_SOURCE_DIR}/tool/fd.cc" + "${BORINGSSL_SOURCE_DIR}/tool/file.cc" + "${BORINGSSL_SOURCE_DIR}/tool/generate_ed25519.cc" + "${BORINGSSL_SOURCE_DIR}/tool/genrsa.cc" + "${BORINGSSL_SOURCE_DIR}/tool/pkcs12.cc" + "${BORINGSSL_SOURCE_DIR}/tool/rand.cc" + "${BORINGSSL_SOURCE_DIR}/tool/server.cc" + "${BORINGSSL_SOURCE_DIR}/tool/sign.cc" + "${BORINGSSL_SOURCE_DIR}/tool/speed.cc" + "${BORINGSSL_SOURCE_DIR}/tool/tool.cc" + "${BORINGSSL_SOURCE_DIR}/tool/transport_common.cc" ) target_link_libraries(ssl crypto) @@ -655,7 +655,7 @@ if(WIN32) target_link_libraries(bssl ws2_32) endif() -target_include_directories(crypto SYSTEM PUBLIC ${BORINGSSL_SOURCE_DIR}/include) -target_include_directories(ssl SYSTEM PUBLIC ${BORINGSSL_SOURCE_DIR}/include) +target_include_directories(crypto SYSTEM PUBLIC "${BORINGSSL_SOURCE_DIR}/include") +target_include_directories(ssl SYSTEM PUBLIC "${BORINGSSL_SOURCE_DIR}/include") target_compile_options(crypto PRIVATE -Wno-gnu-anonymous-struct) diff --git a/contrib/brotli-cmake/CMakeLists.txt b/contrib/brotli-cmake/CMakeLists.txt index 4c5f584de9d..7293cae0665 100644 --- a/contrib/brotli-cmake/CMakeLists.txt +++ b/contrib/brotli-cmake/CMakeLists.txt @@ -1,41 +1,41 @@ -set(BROTLI_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/brotli/c) -set(BROTLI_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/brotli/c) +set(BROTLI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/brotli/c") +set(BROTLI_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/brotli/c") set(SRCS - ${BROTLI_SOURCE_DIR}/enc/command.c - ${BROTLI_SOURCE_DIR}/enc/fast_log.c - ${BROTLI_SOURCE_DIR}/dec/bit_reader.c - ${BROTLI_SOURCE_DIR}/dec/state.c - ${BROTLI_SOURCE_DIR}/dec/huffman.c - ${BROTLI_SOURCE_DIR}/dec/decode.c - ${BROTLI_SOURCE_DIR}/enc/encode.c - ${BROTLI_SOURCE_DIR}/enc/dictionary_hash.c - ${BROTLI_SOURCE_DIR}/enc/cluster.c - ${BROTLI_SOURCE_DIR}/enc/entropy_encode.c - ${BROTLI_SOURCE_DIR}/enc/literal_cost.c - ${BROTLI_SOURCE_DIR}/enc/compress_fragment_two_pass.c - ${BROTLI_SOURCE_DIR}/enc/static_dict.c - ${BROTLI_SOURCE_DIR}/enc/compress_fragment.c - ${BROTLI_SOURCE_DIR}/enc/block_splitter.c - ${BROTLI_SOURCE_DIR}/enc/backward_references_hq.c - ${BROTLI_SOURCE_DIR}/enc/histogram.c - ${BROTLI_SOURCE_DIR}/enc/brotli_bit_stream.c - ${BROTLI_SOURCE_DIR}/enc/utf8_util.c - ${BROTLI_SOURCE_DIR}/enc/encoder_dict.c - ${BROTLI_SOURCE_DIR}/enc/backward_references.c - ${BROTLI_SOURCE_DIR}/enc/bit_cost.c - ${BROTLI_SOURCE_DIR}/enc/metablock.c - ${BROTLI_SOURCE_DIR}/enc/memory.c - ${BROTLI_SOURCE_DIR}/common/dictionary.c - ${BROTLI_SOURCE_DIR}/common/transform.c - ${BROTLI_SOURCE_DIR}/common/platform.c - ${BROTLI_SOURCE_DIR}/common/context.c - ${BROTLI_SOURCE_DIR}/common/constants.c + "${BROTLI_SOURCE_DIR}/enc/command.c" + "${BROTLI_SOURCE_DIR}/enc/fast_log.c" + "${BROTLI_SOURCE_DIR}/dec/bit_reader.c" + "${BROTLI_SOURCE_DIR}/dec/state.c" + "${BROTLI_SOURCE_DIR}/dec/huffman.c" + "${BROTLI_SOURCE_DIR}/dec/decode.c" + "${BROTLI_SOURCE_DIR}/enc/encode.c" + "${BROTLI_SOURCE_DIR}/enc/dictionary_hash.c" + "${BROTLI_SOURCE_DIR}/enc/cluster.c" + "${BROTLI_SOURCE_DIR}/enc/entropy_encode.c" + "${BROTLI_SOURCE_DIR}/enc/literal_cost.c" + "${BROTLI_SOURCE_DIR}/enc/compress_fragment_two_pass.c" + "${BROTLI_SOURCE_DIR}/enc/static_dict.c" + "${BROTLI_SOURCE_DIR}/enc/compress_fragment.c" + "${BROTLI_SOURCE_DIR}/enc/block_splitter.c" + "${BROTLI_SOURCE_DIR}/enc/backward_references_hq.c" + "${BROTLI_SOURCE_DIR}/enc/histogram.c" + "${BROTLI_SOURCE_DIR}/enc/brotli_bit_stream.c" + "${BROTLI_SOURCE_DIR}/enc/utf8_util.c" + "${BROTLI_SOURCE_DIR}/enc/encoder_dict.c" + "${BROTLI_SOURCE_DIR}/enc/backward_references.c" + "${BROTLI_SOURCE_DIR}/enc/bit_cost.c" + "${BROTLI_SOURCE_DIR}/enc/metablock.c" + "${BROTLI_SOURCE_DIR}/enc/memory.c" + "${BROTLI_SOURCE_DIR}/common/dictionary.c" + "${BROTLI_SOURCE_DIR}/common/transform.c" + "${BROTLI_SOURCE_DIR}/common/platform.c" + "${BROTLI_SOURCE_DIR}/common/context.c" + "${BROTLI_SOURCE_DIR}/common/constants.c" ) add_library(brotli ${SRCS}) -target_include_directories(brotli PUBLIC ${BROTLI_SOURCE_DIR}/include) +target_include_directories(brotli PUBLIC "${BROTLI_SOURCE_DIR}/include") if(M_LIBRARY) target_link_libraries(brotli PRIVATE ${M_LIBRARY}) diff --git a/contrib/capnproto-cmake/CMakeLists.txt b/contrib/capnproto-cmake/CMakeLists.txt index 949481e7ef5..9f6e076cc7d 100644 --- a/contrib/capnproto-cmake/CMakeLists.txt +++ b/contrib/capnproto-cmake/CMakeLists.txt @@ -1,53 +1,53 @@ -set (CAPNPROTO_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/src) +set (CAPNPROTO_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/src") set (CMAKE_CXX_STANDARD 17) set (KJ_SRCS - ${CAPNPROTO_SOURCE_DIR}/kj/array.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/common.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/debug.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/exception.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/io.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/memory.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/mutex.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/string.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/hash.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/table.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/thread.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/main.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/arena.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/test-helpers.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/units.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/encoding.c++ + "${CAPNPROTO_SOURCE_DIR}/kj/array.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/common.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/debug.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/exception.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/io.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/memory.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/mutex.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/string.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/hash.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/table.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/thread.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/main.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/arena.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/test-helpers.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/units.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/encoding.c++" - ${CAPNPROTO_SOURCE_DIR}/kj/refcount.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/string-tree.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/time.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/filesystem.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-unix.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-win32.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/parse/char.c++ + "${CAPNPROTO_SOURCE_DIR}/kj/refcount.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/string-tree.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/time.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/filesystem.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-unix.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-win32.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/parse/char.c++" ) add_library(kj ${KJ_SRCS}) target_include_directories(kj SYSTEM PUBLIC ${CAPNPROTO_SOURCE_DIR}) set (CAPNP_SRCS - ${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/blob.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/arena.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/layout.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/list.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/any.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/message.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/schema.capnp.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/serialize.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/serialize-packed.c++ + "${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/blob.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/arena.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/layout.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/list.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/any.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/message.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/schema.capnp.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/serialize.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/serialize-packed.c++" - ${CAPNPROTO_SOURCE_DIR}/capnp/schema.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/schema-loader.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/dynamic.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/stringify.c++ + "${CAPNPROTO_SOURCE_DIR}/capnp/schema.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/schema-loader.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/dynamic.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/stringify.c++" ) add_library(capnp ${CAPNP_SRCS}) @@ -57,16 +57,16 @@ set_target_properties(capnp target_link_libraries(capnp PUBLIC kj) set (CAPNPC_SRCS - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/type-id.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/error-reporter.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.capnp.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/grammar.capnp.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/parser.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/node-translator.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/compiler.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/schema-parser.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/serialize-text.c++ + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/type-id.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/error-reporter.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.capnp.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/grammar.capnp.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/parser.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/node-translator.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/compiler.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/schema-parser.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/serialize-text.c++" ) add_library(capnpc ${CAPNPC_SRCS}) diff --git a/contrib/cctz-cmake/CMakeLists.txt b/contrib/cctz-cmake/CMakeLists.txt index 90e33dc9f62..93413693796 100644 --- a/contrib/cctz-cmake/CMakeLists.txt +++ b/contrib/cctz-cmake/CMakeLists.txt @@ -40,23 +40,23 @@ endif() if (NOT EXTERNAL_CCTZ_LIBRARY_FOUND OR NOT EXTERNAL_CCTZ_LIBRARY_WORKS) set(USE_INTERNAL_CCTZ_LIBRARY 1) - set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/cctz) + set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/cctz") set (SRCS - ${LIBRARY_DIR}/src/civil_time_detail.cc - ${LIBRARY_DIR}/src/time_zone_fixed.cc - ${LIBRARY_DIR}/src/time_zone_format.cc - ${LIBRARY_DIR}/src/time_zone_if.cc - ${LIBRARY_DIR}/src/time_zone_impl.cc - ${LIBRARY_DIR}/src/time_zone_info.cc - ${LIBRARY_DIR}/src/time_zone_libc.cc - ${LIBRARY_DIR}/src/time_zone_lookup.cc - ${LIBRARY_DIR}/src/time_zone_posix.cc - ${LIBRARY_DIR}/src/zone_info_source.cc + "${LIBRARY_DIR}/src/civil_time_detail.cc" + "${LIBRARY_DIR}/src/time_zone_fixed.cc" + "${LIBRARY_DIR}/src/time_zone_format.cc" + "${LIBRARY_DIR}/src/time_zone_if.cc" + "${LIBRARY_DIR}/src/time_zone_impl.cc" + "${LIBRARY_DIR}/src/time_zone_info.cc" + "${LIBRARY_DIR}/src/time_zone_libc.cc" + "${LIBRARY_DIR}/src/time_zone_lookup.cc" + "${LIBRARY_DIR}/src/time_zone_posix.cc" + "${LIBRARY_DIR}/src/zone_info_source.cc" ) add_library (cctz ${SRCS}) - target_include_directories (cctz PUBLIC ${LIBRARY_DIR}/include) + target_include_directories (cctz PUBLIC "${LIBRARY_DIR}/include") if (OS_FREEBSD) # yes, need linux, because bsd check inside linux in time_zone_libc.cc:24 @@ -73,8 +73,8 @@ if (NOT EXTERNAL_CCTZ_LIBRARY_FOUND OR NOT EXTERNAL_CCTZ_LIBRARY_WORKS) # Build a libray with embedded tzdata if (OS_LINUX) # get the list of timezones from tzdata shipped with cctz - set(TZDIR ${LIBRARY_DIR}/testdata/zoneinfo) - file(STRINGS ${LIBRARY_DIR}/testdata/version TZDATA_VERSION) + set(TZDIR "${LIBRARY_DIR}/testdata/zoneinfo") + file(STRINGS "${LIBRARY_DIR}/testdata/version" TZDATA_VERSION) set_property(GLOBAL PROPERTY TZDATA_VERSION_PROP "${TZDATA_VERSION}") message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}") @@ -97,12 +97,19 @@ if (NOT EXTERNAL_CCTZ_LIBRARY_FOUND OR NOT EXTERNAL_CCTZ_LIBRARY_WORKS) set(TZ_OBJS ${TZ_OBJS} ${TZ_OBJ}) # https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake - add_custom_command(OUTPUT ${TZ_OBJ} - COMMAND cp ${TZDIR}/${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID} - COMMAND cd ${CMAKE_CURRENT_BINARY_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} + # PPC64LE fails to do this with objcopy, use ld or lld instead + if (ARCH_PPC64LE) + add_custom_command(OUTPUT ${TZ_OBJ} + COMMAND cp "${TZDIR}/${TIMEZONE}" "${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}" + COMMAND cd ${CMAKE_CURRENT_BINARY_DIR} && ${CMAKE_LINKER} -m elf64lppc -r -b binary -o ${TZ_OBJ} ${TIMEZONE_ID} + COMMAND rm "${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}") + else() + add_custom_command(OUTPUT ${TZ_OBJ} + COMMAND cp "${TZDIR}/${TIMEZONE}" "${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}" + COMMAND cd ${CMAKE_CURRENT_BINARY_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} --rename-section .data=.rodata,alloc,load,readonly,data,contents ${TIMEZONE_ID} ${TZ_OBJ} - COMMAND rm ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}) - + COMMAND rm "${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}") + endif() set_source_files_properties(${TZ_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true) endforeach(TIMEZONE) diff --git a/contrib/cppkafka b/contrib/cppkafka index b06e64ef5bf..57a599d99c5 160000 --- a/contrib/cppkafka +++ b/contrib/cppkafka @@ -1 +1 @@ -Subproject commit b06e64ef5bffd636d918a742c689f69130c1dbab +Subproject commit 57a599d99c540e647bcd0eb9ea77c523cca011b3 diff --git a/contrib/cppkafka-cmake/CMakeLists.txt b/contrib/cppkafka-cmake/CMakeLists.txt index 9f512974948..0bc33ada529 100644 --- a/contrib/cppkafka-cmake/CMakeLists.txt +++ b/contrib/cppkafka-cmake/CMakeLists.txt @@ -1,25 +1,25 @@ -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/cppkafka) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/cppkafka") set(SRCS - ${LIBRARY_DIR}/src/buffer.cpp - ${LIBRARY_DIR}/src/configuration_option.cpp - ${LIBRARY_DIR}/src/configuration.cpp - ${LIBRARY_DIR}/src/consumer.cpp - ${LIBRARY_DIR}/src/error.cpp - ${LIBRARY_DIR}/src/event.cpp - ${LIBRARY_DIR}/src/exceptions.cpp - ${LIBRARY_DIR}/src/group_information.cpp - ${LIBRARY_DIR}/src/kafka_handle_base.cpp - ${LIBRARY_DIR}/src/message_internal.cpp - ${LIBRARY_DIR}/src/message_timestamp.cpp - ${LIBRARY_DIR}/src/message.cpp - ${LIBRARY_DIR}/src/metadata.cpp - ${LIBRARY_DIR}/src/producer.cpp - ${LIBRARY_DIR}/src/queue.cpp - ${LIBRARY_DIR}/src/topic_configuration.cpp - ${LIBRARY_DIR}/src/topic_partition_list.cpp - ${LIBRARY_DIR}/src/topic_partition.cpp - ${LIBRARY_DIR}/src/topic.cpp + "${LIBRARY_DIR}/src/buffer.cpp" + "${LIBRARY_DIR}/src/configuration_option.cpp" + "${LIBRARY_DIR}/src/configuration.cpp" + "${LIBRARY_DIR}/src/consumer.cpp" + "${LIBRARY_DIR}/src/error.cpp" + "${LIBRARY_DIR}/src/event.cpp" + "${LIBRARY_DIR}/src/exceptions.cpp" + "${LIBRARY_DIR}/src/group_information.cpp" + "${LIBRARY_DIR}/src/kafka_handle_base.cpp" + "${LIBRARY_DIR}/src/message_internal.cpp" + "${LIBRARY_DIR}/src/message_timestamp.cpp" + "${LIBRARY_DIR}/src/message.cpp" + "${LIBRARY_DIR}/src/metadata.cpp" + "${LIBRARY_DIR}/src/producer.cpp" + "${LIBRARY_DIR}/src/queue.cpp" + "${LIBRARY_DIR}/src/topic_configuration.cpp" + "${LIBRARY_DIR}/src/topic_partition_list.cpp" + "${LIBRARY_DIR}/src/topic_partition.cpp" + "${LIBRARY_DIR}/src/topic.cpp" ) add_library(cppkafka ${SRCS}) @@ -29,5 +29,5 @@ target_link_libraries(cppkafka ${RDKAFKA_LIBRARY} boost::headers_only ) -target_include_directories(cppkafka PRIVATE ${LIBRARY_DIR}/include/cppkafka) -target_include_directories(cppkafka SYSTEM BEFORE PUBLIC ${LIBRARY_DIR}/include) +target_include_directories(cppkafka PRIVATE "${LIBRARY_DIR}/include/cppkafka") +target_include_directories(cppkafka SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include") diff --git a/contrib/croaring-cmake/CMakeLists.txt b/contrib/croaring-cmake/CMakeLists.txt index 8a8ca62e051..f4a5d8a01dc 100644 --- a/contrib/croaring-cmake/CMakeLists.txt +++ b/contrib/croaring-cmake/CMakeLists.txt @@ -1,26 +1,26 @@ -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/croaring) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/croaring") set(SRCS - ${LIBRARY_DIR}/src/array_util.c - ${LIBRARY_DIR}/src/bitset_util.c - ${LIBRARY_DIR}/src/containers/array.c - ${LIBRARY_DIR}/src/containers/bitset.c - ${LIBRARY_DIR}/src/containers/containers.c - ${LIBRARY_DIR}/src/containers/convert.c - ${LIBRARY_DIR}/src/containers/mixed_intersection.c - ${LIBRARY_DIR}/src/containers/mixed_union.c - ${LIBRARY_DIR}/src/containers/mixed_equal.c - ${LIBRARY_DIR}/src/containers/mixed_subset.c - ${LIBRARY_DIR}/src/containers/mixed_negation.c - ${LIBRARY_DIR}/src/containers/mixed_xor.c - ${LIBRARY_DIR}/src/containers/mixed_andnot.c - ${LIBRARY_DIR}/src/containers/run.c - ${LIBRARY_DIR}/src/roaring.c - ${LIBRARY_DIR}/src/roaring_priority_queue.c - ${LIBRARY_DIR}/src/roaring_array.c) + "${LIBRARY_DIR}/src/array_util.c" + "${LIBRARY_DIR}/src/bitset_util.c" + "${LIBRARY_DIR}/src/containers/array.c" + "${LIBRARY_DIR}/src/containers/bitset.c" + "${LIBRARY_DIR}/src/containers/containers.c" + "${LIBRARY_DIR}/src/containers/convert.c" + "${LIBRARY_DIR}/src/containers/mixed_intersection.c" + "${LIBRARY_DIR}/src/containers/mixed_union.c" + "${LIBRARY_DIR}/src/containers/mixed_equal.c" + "${LIBRARY_DIR}/src/containers/mixed_subset.c" + "${LIBRARY_DIR}/src/containers/mixed_negation.c" + "${LIBRARY_DIR}/src/containers/mixed_xor.c" + "${LIBRARY_DIR}/src/containers/mixed_andnot.c" + "${LIBRARY_DIR}/src/containers/run.c" + "${LIBRARY_DIR}/src/roaring.c" + "${LIBRARY_DIR}/src/roaring_priority_queue.c" + "${LIBRARY_DIR}/src/roaring_array.c") add_library(roaring ${SRCS}) -target_include_directories(roaring PRIVATE ${LIBRARY_DIR}/include/roaring) -target_include_directories(roaring SYSTEM BEFORE PUBLIC ${LIBRARY_DIR}/include) -target_include_directories(roaring SYSTEM BEFORE PUBLIC ${LIBRARY_DIR}/cpp) +target_include_directories(roaring PRIVATE "${LIBRARY_DIR}/include/roaring") +target_include_directories(roaring SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include") +target_include_directories(roaring SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/cpp") diff --git a/contrib/curl-cmake/CMakeLists.txt b/contrib/curl-cmake/CMakeLists.txt index a24c9fa8765..1f7449af914 100644 --- a/contrib/curl-cmake/CMakeLists.txt +++ b/contrib/curl-cmake/CMakeLists.txt @@ -5,143 +5,143 @@ endif() set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/curl") set (SRCS - ${LIBRARY_DIR}/lib/file.c - ${LIBRARY_DIR}/lib/timeval.c - ${LIBRARY_DIR}/lib/base64.c - ${LIBRARY_DIR}/lib/hostip.c - ${LIBRARY_DIR}/lib/progress.c - ${LIBRARY_DIR}/lib/formdata.c - ${LIBRARY_DIR}/lib/cookie.c - ${LIBRARY_DIR}/lib/http.c - ${LIBRARY_DIR}/lib/sendf.c - ${LIBRARY_DIR}/lib/url.c - ${LIBRARY_DIR}/lib/dict.c - ${LIBRARY_DIR}/lib/if2ip.c - ${LIBRARY_DIR}/lib/speedcheck.c - ${LIBRARY_DIR}/lib/ldap.c - ${LIBRARY_DIR}/lib/version.c - ${LIBRARY_DIR}/lib/getenv.c - ${LIBRARY_DIR}/lib/escape.c - ${LIBRARY_DIR}/lib/mprintf.c - ${LIBRARY_DIR}/lib/telnet.c - ${LIBRARY_DIR}/lib/netrc.c - ${LIBRARY_DIR}/lib/getinfo.c - ${LIBRARY_DIR}/lib/transfer.c - ${LIBRARY_DIR}/lib/strcase.c - ${LIBRARY_DIR}/lib/easy.c - ${LIBRARY_DIR}/lib/security.c - ${LIBRARY_DIR}/lib/curl_fnmatch.c - ${LIBRARY_DIR}/lib/fileinfo.c - ${LIBRARY_DIR}/lib/wildcard.c - ${LIBRARY_DIR}/lib/krb5.c - ${LIBRARY_DIR}/lib/memdebug.c - ${LIBRARY_DIR}/lib/http_chunks.c - ${LIBRARY_DIR}/lib/strtok.c - ${LIBRARY_DIR}/lib/connect.c - ${LIBRARY_DIR}/lib/llist.c - ${LIBRARY_DIR}/lib/hash.c - ${LIBRARY_DIR}/lib/multi.c - ${LIBRARY_DIR}/lib/content_encoding.c - ${LIBRARY_DIR}/lib/share.c - ${LIBRARY_DIR}/lib/http_digest.c - ${LIBRARY_DIR}/lib/md4.c - ${LIBRARY_DIR}/lib/md5.c - ${LIBRARY_DIR}/lib/http_negotiate.c - ${LIBRARY_DIR}/lib/inet_pton.c - ${LIBRARY_DIR}/lib/strtoofft.c - ${LIBRARY_DIR}/lib/strerror.c - ${LIBRARY_DIR}/lib/amigaos.c - ${LIBRARY_DIR}/lib/hostasyn.c - ${LIBRARY_DIR}/lib/hostip4.c - ${LIBRARY_DIR}/lib/hostip6.c - ${LIBRARY_DIR}/lib/hostsyn.c - ${LIBRARY_DIR}/lib/inet_ntop.c - ${LIBRARY_DIR}/lib/parsedate.c - ${LIBRARY_DIR}/lib/select.c - ${LIBRARY_DIR}/lib/splay.c - ${LIBRARY_DIR}/lib/strdup.c - ${LIBRARY_DIR}/lib/socks.c - ${LIBRARY_DIR}/lib/curl_addrinfo.c - ${LIBRARY_DIR}/lib/socks_gssapi.c - ${LIBRARY_DIR}/lib/socks_sspi.c - ${LIBRARY_DIR}/lib/curl_sspi.c - ${LIBRARY_DIR}/lib/slist.c - ${LIBRARY_DIR}/lib/nonblock.c - ${LIBRARY_DIR}/lib/curl_memrchr.c - ${LIBRARY_DIR}/lib/imap.c - ${LIBRARY_DIR}/lib/pop3.c - ${LIBRARY_DIR}/lib/smtp.c - ${LIBRARY_DIR}/lib/pingpong.c - ${LIBRARY_DIR}/lib/rtsp.c - ${LIBRARY_DIR}/lib/curl_threads.c - ${LIBRARY_DIR}/lib/warnless.c - ${LIBRARY_DIR}/lib/hmac.c - ${LIBRARY_DIR}/lib/curl_rtmp.c - ${LIBRARY_DIR}/lib/openldap.c - ${LIBRARY_DIR}/lib/curl_gethostname.c - ${LIBRARY_DIR}/lib/gopher.c - ${LIBRARY_DIR}/lib/idn_win32.c - ${LIBRARY_DIR}/lib/http_proxy.c - ${LIBRARY_DIR}/lib/non-ascii.c - ${LIBRARY_DIR}/lib/asyn-thread.c - ${LIBRARY_DIR}/lib/curl_gssapi.c - ${LIBRARY_DIR}/lib/http_ntlm.c - ${LIBRARY_DIR}/lib/curl_ntlm_wb.c - ${LIBRARY_DIR}/lib/curl_ntlm_core.c - ${LIBRARY_DIR}/lib/curl_sasl.c - ${LIBRARY_DIR}/lib/rand.c - ${LIBRARY_DIR}/lib/curl_multibyte.c - ${LIBRARY_DIR}/lib/hostcheck.c - ${LIBRARY_DIR}/lib/conncache.c - ${LIBRARY_DIR}/lib/dotdot.c - ${LIBRARY_DIR}/lib/x509asn1.c - ${LIBRARY_DIR}/lib/http2.c - ${LIBRARY_DIR}/lib/smb.c - ${LIBRARY_DIR}/lib/curl_endian.c - ${LIBRARY_DIR}/lib/curl_des.c - ${LIBRARY_DIR}/lib/system_win32.c - ${LIBRARY_DIR}/lib/mime.c - ${LIBRARY_DIR}/lib/sha256.c - ${LIBRARY_DIR}/lib/setopt.c - ${LIBRARY_DIR}/lib/curl_path.c - ${LIBRARY_DIR}/lib/curl_ctype.c - ${LIBRARY_DIR}/lib/curl_range.c - ${LIBRARY_DIR}/lib/psl.c - ${LIBRARY_DIR}/lib/doh.c - ${LIBRARY_DIR}/lib/urlapi.c - ${LIBRARY_DIR}/lib/curl_get_line.c - ${LIBRARY_DIR}/lib/altsvc.c - ${LIBRARY_DIR}/lib/socketpair.c - ${LIBRARY_DIR}/lib/vauth/vauth.c - ${LIBRARY_DIR}/lib/vauth/cleartext.c - ${LIBRARY_DIR}/lib/vauth/cram.c - ${LIBRARY_DIR}/lib/vauth/digest.c - ${LIBRARY_DIR}/lib/vauth/digest_sspi.c - ${LIBRARY_DIR}/lib/vauth/krb5_gssapi.c - ${LIBRARY_DIR}/lib/vauth/krb5_sspi.c - ${LIBRARY_DIR}/lib/vauth/ntlm.c - ${LIBRARY_DIR}/lib/vauth/ntlm_sspi.c - ${LIBRARY_DIR}/lib/vauth/oauth2.c - ${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c - ${LIBRARY_DIR}/lib/vauth/spnego_sspi.c - ${LIBRARY_DIR}/lib/vtls/openssl.c - ${LIBRARY_DIR}/lib/vtls/gtls.c - ${LIBRARY_DIR}/lib/vtls/vtls.c - ${LIBRARY_DIR}/lib/vtls/nss.c - ${LIBRARY_DIR}/lib/vtls/polarssl.c - ${LIBRARY_DIR}/lib/vtls/polarssl_threadlock.c - ${LIBRARY_DIR}/lib/vtls/wolfssl.c - ${LIBRARY_DIR}/lib/vtls/schannel.c - ${LIBRARY_DIR}/lib/vtls/schannel_verify.c - ${LIBRARY_DIR}/lib/vtls/sectransp.c - ${LIBRARY_DIR}/lib/vtls/gskit.c - ${LIBRARY_DIR}/lib/vtls/mbedtls.c - ${LIBRARY_DIR}/lib/vtls/mesalink.c - ${LIBRARY_DIR}/lib/vtls/bearssl.c - ${LIBRARY_DIR}/lib/vquic/ngtcp2.c - ${LIBRARY_DIR}/lib/vquic/quiche.c - ${LIBRARY_DIR}/lib/vssh/libssh2.c - ${LIBRARY_DIR}/lib/vssh/libssh.c + "${LIBRARY_DIR}/lib/file.c" + "${LIBRARY_DIR}/lib/timeval.c" + "${LIBRARY_DIR}/lib/base64.c" + "${LIBRARY_DIR}/lib/hostip.c" + "${LIBRARY_DIR}/lib/progress.c" + "${LIBRARY_DIR}/lib/formdata.c" + "${LIBRARY_DIR}/lib/cookie.c" + "${LIBRARY_DIR}/lib/http.c" + "${LIBRARY_DIR}/lib/sendf.c" + "${LIBRARY_DIR}/lib/url.c" + "${LIBRARY_DIR}/lib/dict.c" + "${LIBRARY_DIR}/lib/if2ip.c" + "${LIBRARY_DIR}/lib/speedcheck.c" + "${LIBRARY_DIR}/lib/ldap.c" + "${LIBRARY_DIR}/lib/version.c" + "${LIBRARY_DIR}/lib/getenv.c" + "${LIBRARY_DIR}/lib/escape.c" + "${LIBRARY_DIR}/lib/mprintf.c" + "${LIBRARY_DIR}/lib/telnet.c" + "${LIBRARY_DIR}/lib/netrc.c" + "${LIBRARY_DIR}/lib/getinfo.c" + "${LIBRARY_DIR}/lib/transfer.c" + "${LIBRARY_DIR}/lib/strcase.c" + "${LIBRARY_DIR}/lib/easy.c" + "${LIBRARY_DIR}/lib/security.c" + "${LIBRARY_DIR}/lib/curl_fnmatch.c" + "${LIBRARY_DIR}/lib/fileinfo.c" + "${LIBRARY_DIR}/lib/wildcard.c" + "${LIBRARY_DIR}/lib/krb5.c" + "${LIBRARY_DIR}/lib/memdebug.c" + "${LIBRARY_DIR}/lib/http_chunks.c" + "${LIBRARY_DIR}/lib/strtok.c" + "${LIBRARY_DIR}/lib/connect.c" + "${LIBRARY_DIR}/lib/llist.c" + "${LIBRARY_DIR}/lib/hash.c" + "${LIBRARY_DIR}/lib/multi.c" + "${LIBRARY_DIR}/lib/content_encoding.c" + "${LIBRARY_DIR}/lib/share.c" + "${LIBRARY_DIR}/lib/http_digest.c" + "${LIBRARY_DIR}/lib/md4.c" + "${LIBRARY_DIR}/lib/md5.c" + "${LIBRARY_DIR}/lib/http_negotiate.c" + "${LIBRARY_DIR}/lib/inet_pton.c" + "${LIBRARY_DIR}/lib/strtoofft.c" + "${LIBRARY_DIR}/lib/strerror.c" + "${LIBRARY_DIR}/lib/amigaos.c" + "${LIBRARY_DIR}/lib/hostasyn.c" + "${LIBRARY_DIR}/lib/hostip4.c" + "${LIBRARY_DIR}/lib/hostip6.c" + "${LIBRARY_DIR}/lib/hostsyn.c" + "${LIBRARY_DIR}/lib/inet_ntop.c" + "${LIBRARY_DIR}/lib/parsedate.c" + "${LIBRARY_DIR}/lib/select.c" + "${LIBRARY_DIR}/lib/splay.c" + "${LIBRARY_DIR}/lib/strdup.c" + "${LIBRARY_DIR}/lib/socks.c" + "${LIBRARY_DIR}/lib/curl_addrinfo.c" + "${LIBRARY_DIR}/lib/socks_gssapi.c" + "${LIBRARY_DIR}/lib/socks_sspi.c" + "${LIBRARY_DIR}/lib/curl_sspi.c" + "${LIBRARY_DIR}/lib/slist.c" + "${LIBRARY_DIR}/lib/nonblock.c" + "${LIBRARY_DIR}/lib/curl_memrchr.c" + "${LIBRARY_DIR}/lib/imap.c" + "${LIBRARY_DIR}/lib/pop3.c" + "${LIBRARY_DIR}/lib/smtp.c" + "${LIBRARY_DIR}/lib/pingpong.c" + "${LIBRARY_DIR}/lib/rtsp.c" + "${LIBRARY_DIR}/lib/curl_threads.c" + "${LIBRARY_DIR}/lib/warnless.c" + "${LIBRARY_DIR}/lib/hmac.c" + "${LIBRARY_DIR}/lib/curl_rtmp.c" + "${LIBRARY_DIR}/lib/openldap.c" + "${LIBRARY_DIR}/lib/curl_gethostname.c" + "${LIBRARY_DIR}/lib/gopher.c" + "${LIBRARY_DIR}/lib/idn_win32.c" + "${LIBRARY_DIR}/lib/http_proxy.c" + "${LIBRARY_DIR}/lib/non-ascii.c" + "${LIBRARY_DIR}/lib/asyn-thread.c" + "${LIBRARY_DIR}/lib/curl_gssapi.c" + "${LIBRARY_DIR}/lib/http_ntlm.c" + "${LIBRARY_DIR}/lib/curl_ntlm_wb.c" + "${LIBRARY_DIR}/lib/curl_ntlm_core.c" + "${LIBRARY_DIR}/lib/curl_sasl.c" + "${LIBRARY_DIR}/lib/rand.c" + "${LIBRARY_DIR}/lib/curl_multibyte.c" + "${LIBRARY_DIR}/lib/hostcheck.c" + "${LIBRARY_DIR}/lib/conncache.c" + "${LIBRARY_DIR}/lib/dotdot.c" + "${LIBRARY_DIR}/lib/x509asn1.c" + "${LIBRARY_DIR}/lib/http2.c" + "${LIBRARY_DIR}/lib/smb.c" + "${LIBRARY_DIR}/lib/curl_endian.c" + "${LIBRARY_DIR}/lib/curl_des.c" + "${LIBRARY_DIR}/lib/system_win32.c" + "${LIBRARY_DIR}/lib/mime.c" + "${LIBRARY_DIR}/lib/sha256.c" + "${LIBRARY_DIR}/lib/setopt.c" + "${LIBRARY_DIR}/lib/curl_path.c" + "${LIBRARY_DIR}/lib/curl_ctype.c" + "${LIBRARY_DIR}/lib/curl_range.c" + "${LIBRARY_DIR}/lib/psl.c" + "${LIBRARY_DIR}/lib/doh.c" + "${LIBRARY_DIR}/lib/urlapi.c" + "${LIBRARY_DIR}/lib/curl_get_line.c" + "${LIBRARY_DIR}/lib/altsvc.c" + "${LIBRARY_DIR}/lib/socketpair.c" + "${LIBRARY_DIR}/lib/vauth/vauth.c" + "${LIBRARY_DIR}/lib/vauth/cleartext.c" + "${LIBRARY_DIR}/lib/vauth/cram.c" + "${LIBRARY_DIR}/lib/vauth/digest.c" + "${LIBRARY_DIR}/lib/vauth/digest_sspi.c" + "${LIBRARY_DIR}/lib/vauth/krb5_gssapi.c" + "${LIBRARY_DIR}/lib/vauth/krb5_sspi.c" + "${LIBRARY_DIR}/lib/vauth/ntlm.c" + "${LIBRARY_DIR}/lib/vauth/ntlm_sspi.c" + "${LIBRARY_DIR}/lib/vauth/oauth2.c" + "${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c" + "${LIBRARY_DIR}/lib/vauth/spnego_sspi.c" + "${LIBRARY_DIR}/lib/vtls/openssl.c" + "${LIBRARY_DIR}/lib/vtls/gtls.c" + "${LIBRARY_DIR}/lib/vtls/vtls.c" + "${LIBRARY_DIR}/lib/vtls/nss.c" + "${LIBRARY_DIR}/lib/vtls/polarssl.c" + "${LIBRARY_DIR}/lib/vtls/polarssl_threadlock.c" + "${LIBRARY_DIR}/lib/vtls/wolfssl.c" + "${LIBRARY_DIR}/lib/vtls/schannel.c" + "${LIBRARY_DIR}/lib/vtls/schannel_verify.c" + "${LIBRARY_DIR}/lib/vtls/sectransp.c" + "${LIBRARY_DIR}/lib/vtls/gskit.c" + "${LIBRARY_DIR}/lib/vtls/mbedtls.c" + "${LIBRARY_DIR}/lib/vtls/mesalink.c" + "${LIBRARY_DIR}/lib/vtls/bearssl.c" + "${LIBRARY_DIR}/lib/vquic/ngtcp2.c" + "${LIBRARY_DIR}/lib/vquic/quiche.c" + "${LIBRARY_DIR}/lib/vssh/libssh2.c" + "${LIBRARY_DIR}/lib/vssh/libssh.c" ) add_library (curl ${SRCS}) @@ -154,8 +154,8 @@ target_compile_definitions (curl PRIVATE OS="${CMAKE_SYSTEM_NAME}" ) target_include_directories (curl PUBLIC - ${LIBRARY_DIR}/include - ${LIBRARY_DIR}/lib + "${LIBRARY_DIR}/include" + "${LIBRARY_DIR}/lib" . # curl_config.h ) @@ -171,8 +171,8 @@ target_compile_options (curl PRIVATE -g0) # - sentry-native set (CURL_FOUND ON CACHE BOOL "") set (CURL_ROOT_DIR ${LIBRARY_DIR} CACHE PATH "") -set (CURL_INCLUDE_DIR ${LIBRARY_DIR}/include CACHE PATH "") -set (CURL_INCLUDE_DIRS ${LIBRARY_DIR}/include CACHE PATH "") +set (CURL_INCLUDE_DIR "${LIBRARY_DIR}/include" CACHE PATH "") +set (CURL_INCLUDE_DIRS "${LIBRARY_DIR}/include" CACHE PATH "") set (CURL_LIBRARY curl CACHE STRING "") set (CURL_LIBRARIES ${CURL_LIBRARY} CACHE STRING "") set (CURL_VERSION_STRING 7.67.0 CACHE STRING "") diff --git a/contrib/cyrus-sasl b/contrib/cyrus-sasl index 9995bf9d8e1..e6466edfd63 160000 --- a/contrib/cyrus-sasl +++ b/contrib/cyrus-sasl @@ -1 +1 @@ -Subproject commit 9995bf9d8e14f58934d9313ac64f13780d6dd3c9 +Subproject commit e6466edfd638cc5073debe941c53345b18a09512 diff --git a/contrib/cyrus-sasl-cmake/CMakeLists.txt b/contrib/cyrus-sasl-cmake/CMakeLists.txt index 5003c9a21db..aa25a078718 100644 --- a/contrib/cyrus-sasl-cmake/CMakeLists.txt +++ b/contrib/cyrus-sasl-cmake/CMakeLists.txt @@ -1,23 +1,23 @@ -set(CYRUS_SASL_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/cyrus-sasl) +set(CYRUS_SASL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/cyrus-sasl") add_library(${CYRUS_SASL_LIBRARY}) target_sources(${CYRUS_SASL_LIBRARY} PRIVATE - ${CYRUS_SASL_SOURCE_DIR}/plugins/gssapi.c - # ${CYRUS_SASL_SOURCE_DIR}/plugins/gssapiv2_init.c - ${CYRUS_SASL_SOURCE_DIR}/common/plugin_common.c - ${CYRUS_SASL_SOURCE_DIR}/lib/common.c - ${CYRUS_SASL_SOURCE_DIR}/lib/canonusr.c - ${CYRUS_SASL_SOURCE_DIR}/lib/server.c - ${CYRUS_SASL_SOURCE_DIR}/lib/config.c - ${CYRUS_SASL_SOURCE_DIR}/lib/auxprop.c - ${CYRUS_SASL_SOURCE_DIR}/lib/saslutil.c - ${CYRUS_SASL_SOURCE_DIR}/lib/external.c - ${CYRUS_SASL_SOURCE_DIR}/lib/seterror.c - ${CYRUS_SASL_SOURCE_DIR}/lib/md5.c - ${CYRUS_SASL_SOURCE_DIR}/lib/dlopen.c - ${CYRUS_SASL_SOURCE_DIR}/lib/client.c - ${CYRUS_SASL_SOURCE_DIR}/lib/checkpw.c + "${CYRUS_SASL_SOURCE_DIR}/plugins/gssapi.c" + # "${CYRUS_SASL_SOURCE_DIR}/plugins/gssapiv2_init.c" + "${CYRUS_SASL_SOURCE_DIR}/common/plugin_common.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/common.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/canonusr.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/server.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/config.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/auxprop.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/saslutil.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/external.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/seterror.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/md5.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/dlopen.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/client.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/checkpw.c" ) target_include_directories(${CYRUS_SASL_LIBRARY} PUBLIC @@ -26,16 +26,16 @@ target_include_directories(${CYRUS_SASL_LIBRARY} PUBLIC target_include_directories(${CYRUS_SASL_LIBRARY} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # for config.h - ${CYRUS_SASL_SOURCE_DIR}/plugins + "${CYRUS_SASL_SOURCE_DIR}/plugins" ${CYRUS_SASL_SOURCE_DIR} - ${CYRUS_SASL_SOURCE_DIR}/include - ${CYRUS_SASL_SOURCE_DIR}/lib - ${CYRUS_SASL_SOURCE_DIR}/sasldb - ${CYRUS_SASL_SOURCE_DIR}/common - ${CYRUS_SASL_SOURCE_DIR}/saslauthd - ${CYRUS_SASL_SOURCE_DIR}/sample - ${CYRUS_SASL_SOURCE_DIR}/utils - ${CYRUS_SASL_SOURCE_DIR}/tests + "${CYRUS_SASL_SOURCE_DIR}/include" + "${CYRUS_SASL_SOURCE_DIR}/lib" + "${CYRUS_SASL_SOURCE_DIR}/sasldb" + "${CYRUS_SASL_SOURCE_DIR}/common" + "${CYRUS_SASL_SOURCE_DIR}/saslauthd" + "${CYRUS_SASL_SOURCE_DIR}/sample" + "${CYRUS_SASL_SOURCE_DIR}/utils" + "${CYRUS_SASL_SOURCE_DIR}/tests" ) target_compile_definitions(${CYRUS_SASL_LIBRARY} PUBLIC @@ -52,15 +52,15 @@ target_compile_definitions(${CYRUS_SASL_LIBRARY} PUBLIC LIBSASL_EXPORTS=1 ) -file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/sasl) +file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/sasl") file(COPY - ${CYRUS_SASL_SOURCE_DIR}/include/sasl.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/sasl + "${CYRUS_SASL_SOURCE_DIR}/include/sasl.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/sasl" ) file(COPY - ${CYRUS_SASL_SOURCE_DIR}/include/prop.h + "${CYRUS_SASL_SOURCE_DIR}/include/prop.h" DESTINATION ${CMAKE_CURRENT_BINARY_DIR} ) diff --git a/contrib/datasketches-cpp b/contrib/datasketches-cpp new file mode 160000 index 00000000000..7d73d7610db --- /dev/null +++ b/contrib/datasketches-cpp @@ -0,0 +1 @@ +Subproject commit 7d73d7610db31d4e1ecde0fb3a7ee90ef371207f diff --git a/contrib/double-conversion-cmake/CMakeLists.txt b/contrib/double-conversion-cmake/CMakeLists.txt index 0690731e1b1..c8bf1b34b8f 100644 --- a/contrib/double-conversion-cmake/CMakeLists.txt +++ b/contrib/double-conversion-cmake/CMakeLists.txt @@ -1,13 +1,13 @@ -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/double-conversion) +SET(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/double-conversion") add_library(double-conversion -${LIBRARY_DIR}/double-conversion/bignum.cc -${LIBRARY_DIR}/double-conversion/bignum-dtoa.cc -${LIBRARY_DIR}/double-conversion/cached-powers.cc -${LIBRARY_DIR}/double-conversion/diy-fp.cc -${LIBRARY_DIR}/double-conversion/double-conversion.cc -${LIBRARY_DIR}/double-conversion/fast-dtoa.cc -${LIBRARY_DIR}/double-conversion/fixed-dtoa.cc -${LIBRARY_DIR}/double-conversion/strtod.cc) +"${LIBRARY_DIR}/double-conversion/bignum.cc" +"${LIBRARY_DIR}/double-conversion/bignum-dtoa.cc" +"${LIBRARY_DIR}/double-conversion/cached-powers.cc" +"${LIBRARY_DIR}/double-conversion/diy-fp.cc" +"${LIBRARY_DIR}/double-conversion/double-conversion.cc" +"${LIBRARY_DIR}/double-conversion/fast-dtoa.cc" +"${LIBRARY_DIR}/double-conversion/fixed-dtoa.cc" +"${LIBRARY_DIR}/double-conversion/strtod.cc") target_include_directories(double-conversion SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}") diff --git a/contrib/fastops-cmake/CMakeLists.txt b/contrib/fastops-cmake/CMakeLists.txt index 0269d5603c2..fe7293c614b 100644 --- a/contrib/fastops-cmake/CMakeLists.txt +++ b/contrib/fastops-cmake/CMakeLists.txt @@ -1,18 +1,18 @@ -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/fastops) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/fastops") set(SRCS "") if(HAVE_AVX) - set (SRCS ${SRCS} ${LIBRARY_DIR}/fastops/avx/ops_avx.cpp) - set_source_files_properties(${LIBRARY_DIR}/fastops/avx/ops_avx.cpp PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2") + set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx/ops_avx.cpp") + set_source_files_properties("${LIBRARY_DIR}/fastops/avx/ops_avx.cpp" PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2") endif() if(HAVE_AVX2) - set (SRCS ${SRCS} ${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp) - set_source_files_properties(${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp PROPERTIES COMPILE_FLAGS "-mavx2 -mfma") + set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp") + set_source_files_properties("${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp" PROPERTIES COMPILE_FLAGS "-mavx2 -mfma") endif() -set (SRCS ${SRCS} ${LIBRARY_DIR}/fastops/plain/ops_plain.cpp ${LIBRARY_DIR}/fastops/core/avx_id.cpp ${LIBRARY_DIR}/fastops/fastops.cpp) +set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/plain/ops_plain.cpp" "${LIBRARY_DIR}/fastops/core/avx_id.cpp" "${LIBRARY_DIR}/fastops/fastops.cpp") add_library(fastops ${SRCS}) diff --git a/contrib/flatbuffers b/contrib/flatbuffers index 6df40a24717..22e3ffc66d2 160000 --- a/contrib/flatbuffers +++ b/contrib/flatbuffers @@ -1 +1 @@ -Subproject commit 6df40a2471737b27271bdd9b900ab5f3aec746c7 +Subproject commit 22e3ffc66d2d7d72d1414390aa0f04ffd114a5a1 diff --git a/contrib/grpc b/contrib/grpc index 7436366ceb3..60c986e15ca 160000 --- a/contrib/grpc +++ b/contrib/grpc @@ -1 +1 @@ -Subproject commit 7436366ceb341ba5c00ea29f1645e02a2b70bf93 +Subproject commit 60c986e15cae70aade721d26badabab1f822fdd6 diff --git a/contrib/h3-cmake/CMakeLists.txt b/contrib/h3-cmake/CMakeLists.txt index 2911d7283f0..6b184a175b0 100644 --- a/contrib/h3-cmake/CMakeLists.txt +++ b/contrib/h3-cmake/CMakeLists.txt @@ -1,30 +1,30 @@ -set(H3_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/h3/src/h3lib) -set(H3_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/h3/src/h3lib) +set(H3_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/h3/src/h3lib") +set(H3_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/h3/src/h3lib") set(SRCS -${H3_SOURCE_DIR}/lib/algos.c -${H3_SOURCE_DIR}/lib/baseCells.c -${H3_SOURCE_DIR}/lib/bbox.c -${H3_SOURCE_DIR}/lib/coordijk.c -${H3_SOURCE_DIR}/lib/faceijk.c -${H3_SOURCE_DIR}/lib/geoCoord.c -${H3_SOURCE_DIR}/lib/h3Index.c -${H3_SOURCE_DIR}/lib/h3UniEdge.c -${H3_SOURCE_DIR}/lib/linkedGeo.c -${H3_SOURCE_DIR}/lib/localij.c -${H3_SOURCE_DIR}/lib/mathExtensions.c -${H3_SOURCE_DIR}/lib/polygon.c -${H3_SOURCE_DIR}/lib/vec2d.c -${H3_SOURCE_DIR}/lib/vec3d.c -${H3_SOURCE_DIR}/lib/vertex.c -${H3_SOURCE_DIR}/lib/vertexGraph.c +"${H3_SOURCE_DIR}/lib/algos.c" +"${H3_SOURCE_DIR}/lib/baseCells.c" +"${H3_SOURCE_DIR}/lib/bbox.c" +"${H3_SOURCE_DIR}/lib/coordijk.c" +"${H3_SOURCE_DIR}/lib/faceijk.c" +"${H3_SOURCE_DIR}/lib/geoCoord.c" +"${H3_SOURCE_DIR}/lib/h3Index.c" +"${H3_SOURCE_DIR}/lib/h3UniEdge.c" +"${H3_SOURCE_DIR}/lib/linkedGeo.c" +"${H3_SOURCE_DIR}/lib/localij.c" +"${H3_SOURCE_DIR}/lib/mathExtensions.c" +"${H3_SOURCE_DIR}/lib/polygon.c" +"${H3_SOURCE_DIR}/lib/vec2d.c" +"${H3_SOURCE_DIR}/lib/vec3d.c" +"${H3_SOURCE_DIR}/lib/vertex.c" +"${H3_SOURCE_DIR}/lib/vertexGraph.c" ) -configure_file(${H3_SOURCE_DIR}/include/h3api.h.in ${H3_BINARY_DIR}/include/h3api.h) +configure_file("${H3_SOURCE_DIR}/include/h3api.h.in" "${H3_BINARY_DIR}/include/h3api.h") add_library(h3 ${SRCS}) -target_include_directories(h3 SYSTEM PUBLIC ${H3_SOURCE_DIR}/include) -target_include_directories(h3 SYSTEM PUBLIC ${H3_BINARY_DIR}/include) +target_include_directories(h3 SYSTEM PUBLIC "${H3_SOURCE_DIR}/include") +target_include_directories(h3 SYSTEM PUBLIC "${H3_BINARY_DIR}/include") target_compile_definitions(h3 PRIVATE H3_HAVE_VLA) if(M_LIBRARY) target_link_libraries(h3 PRIVATE ${M_LIBRARY}) diff --git a/contrib/hyperscan-cmake/CMakeLists.txt b/contrib/hyperscan-cmake/CMakeLists.txt index 75c45ff7bf5..6a364da126d 100644 --- a/contrib/hyperscan-cmake/CMakeLists.txt +++ b/contrib/hyperscan-cmake/CMakeLists.txt @@ -40,211 +40,211 @@ endif () if (NOT EXTERNAL_HYPERSCAN_LIBRARY_FOUND) set (USE_INTERNAL_HYPERSCAN_LIBRARY 1) - set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/hyperscan) + set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/hyperscan") set (SRCS - ${LIBRARY_DIR}/src/alloc.c - ${LIBRARY_DIR}/src/compiler/asserts.cpp - ${LIBRARY_DIR}/src/compiler/compiler.cpp - ${LIBRARY_DIR}/src/compiler/error.cpp - ${LIBRARY_DIR}/src/crc32.c - ${LIBRARY_DIR}/src/database.c - ${LIBRARY_DIR}/src/fdr/engine_description.cpp - ${LIBRARY_DIR}/src/fdr/fdr_compile_util.cpp - ${LIBRARY_DIR}/src/fdr/fdr_compile.cpp - ${LIBRARY_DIR}/src/fdr/fdr_confirm_compile.cpp - ${LIBRARY_DIR}/src/fdr/fdr_engine_description.cpp - ${LIBRARY_DIR}/src/fdr/fdr.c - ${LIBRARY_DIR}/src/fdr/flood_compile.cpp - ${LIBRARY_DIR}/src/fdr/teddy_compile.cpp - ${LIBRARY_DIR}/src/fdr/teddy_engine_description.cpp - ${LIBRARY_DIR}/src/fdr/teddy.c - ${LIBRARY_DIR}/src/grey.cpp - ${LIBRARY_DIR}/src/hs_valid_platform.c - ${LIBRARY_DIR}/src/hs_version.c - ${LIBRARY_DIR}/src/hs.cpp - ${LIBRARY_DIR}/src/hwlm/hwlm_build.cpp - ${LIBRARY_DIR}/src/hwlm/hwlm_literal.cpp - ${LIBRARY_DIR}/src/hwlm/hwlm.c - ${LIBRARY_DIR}/src/hwlm/noodle_build.cpp - ${LIBRARY_DIR}/src/hwlm/noodle_engine.c - ${LIBRARY_DIR}/src/nfa/accel_dfa_build_strat.cpp - ${LIBRARY_DIR}/src/nfa/accel.c - ${LIBRARY_DIR}/src/nfa/accelcompile.cpp - ${LIBRARY_DIR}/src/nfa/castle.c - ${LIBRARY_DIR}/src/nfa/castlecompile.cpp - ${LIBRARY_DIR}/src/nfa/dfa_build_strat.cpp - ${LIBRARY_DIR}/src/nfa/dfa_min.cpp - ${LIBRARY_DIR}/src/nfa/gough.c - ${LIBRARY_DIR}/src/nfa/goughcompile_accel.cpp - ${LIBRARY_DIR}/src/nfa/goughcompile_reg.cpp - ${LIBRARY_DIR}/src/nfa/goughcompile.cpp - ${LIBRARY_DIR}/src/nfa/lbr.c - ${LIBRARY_DIR}/src/nfa/limex_64.c - ${LIBRARY_DIR}/src/nfa/limex_accel.c - ${LIBRARY_DIR}/src/nfa/limex_compile.cpp - ${LIBRARY_DIR}/src/nfa/limex_native.c - ${LIBRARY_DIR}/src/nfa/limex_simd128.c - ${LIBRARY_DIR}/src/nfa/limex_simd256.c - ${LIBRARY_DIR}/src/nfa/limex_simd384.c - ${LIBRARY_DIR}/src/nfa/limex_simd512.c - ${LIBRARY_DIR}/src/nfa/mcclellan.c - ${LIBRARY_DIR}/src/nfa/mcclellancompile_util.cpp - ${LIBRARY_DIR}/src/nfa/mcclellancompile.cpp - ${LIBRARY_DIR}/src/nfa/mcsheng_compile.cpp - ${LIBRARY_DIR}/src/nfa/mcsheng_data.c - ${LIBRARY_DIR}/src/nfa/mcsheng.c - ${LIBRARY_DIR}/src/nfa/mpv.c - ${LIBRARY_DIR}/src/nfa/mpvcompile.cpp - ${LIBRARY_DIR}/src/nfa/nfa_api_dispatch.c - ${LIBRARY_DIR}/src/nfa/nfa_build_util.cpp - ${LIBRARY_DIR}/src/nfa/rdfa_graph.cpp - ${LIBRARY_DIR}/src/nfa/rdfa_merge.cpp - ${LIBRARY_DIR}/src/nfa/rdfa.cpp - ${LIBRARY_DIR}/src/nfa/repeat.c - ${LIBRARY_DIR}/src/nfa/repeatcompile.cpp - ${LIBRARY_DIR}/src/nfa/sheng.c - ${LIBRARY_DIR}/src/nfa/shengcompile.cpp - ${LIBRARY_DIR}/src/nfa/shufti.c - ${LIBRARY_DIR}/src/nfa/shufticompile.cpp - ${LIBRARY_DIR}/src/nfa/tamarama.c - ${LIBRARY_DIR}/src/nfa/tamaramacompile.cpp - ${LIBRARY_DIR}/src/nfa/truffle.c - ${LIBRARY_DIR}/src/nfa/trufflecompile.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_anchored_acyclic.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_anchored_dots.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_asserts.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_builder.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_calc_components.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_cyclic_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_depth.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_dominators.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_edge_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_equivalence.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_execute.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_expr_info.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_extparam.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_fixed_width.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_fuzzy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_haig.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_holder.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_is_equal.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_lbr.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_limex_accel.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_limex.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_literal_analysis.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_literal_component.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_literal_decorated.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_mcclellan.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_misc_opt.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_netflow.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_prefilter.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_prune.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_puff.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_region_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_region.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_repeat.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_reports.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_restructuring.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_revacc.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_sep.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_small_literal_set.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_som_add_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_som_util.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_som.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_split.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_squash.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_stop.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_uncalc_components.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_utf8.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_util.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_vacuous.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_violet.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_width.cpp - ${LIBRARY_DIR}/src/nfagraph/ng.cpp - ${LIBRARY_DIR}/src/parser/AsciiComponentClass.cpp - ${LIBRARY_DIR}/src/parser/buildstate.cpp - ${LIBRARY_DIR}/src/parser/check_refs.cpp - ${LIBRARY_DIR}/src/parser/Component.cpp - ${LIBRARY_DIR}/src/parser/ComponentAlternation.cpp - ${LIBRARY_DIR}/src/parser/ComponentAssertion.cpp - ${LIBRARY_DIR}/src/parser/ComponentAtomicGroup.cpp - ${LIBRARY_DIR}/src/parser/ComponentBackReference.cpp - ${LIBRARY_DIR}/src/parser/ComponentBoundary.cpp - ${LIBRARY_DIR}/src/parser/ComponentByte.cpp - ${LIBRARY_DIR}/src/parser/ComponentClass.cpp - ${LIBRARY_DIR}/src/parser/ComponentCondReference.cpp - ${LIBRARY_DIR}/src/parser/ComponentEmpty.cpp - ${LIBRARY_DIR}/src/parser/ComponentEUS.cpp - ${LIBRARY_DIR}/src/parser/ComponentRepeat.cpp - ${LIBRARY_DIR}/src/parser/ComponentSequence.cpp - ${LIBRARY_DIR}/src/parser/ComponentVisitor.cpp - ${LIBRARY_DIR}/src/parser/ComponentWordBoundary.cpp - ${LIBRARY_DIR}/src/parser/ConstComponentVisitor.cpp - ${LIBRARY_DIR}/src/parser/control_verbs.cpp - ${LIBRARY_DIR}/src/parser/logical_combination.cpp - ${LIBRARY_DIR}/src/parser/parse_error.cpp - ${LIBRARY_DIR}/src/parser/parser_util.cpp - ${LIBRARY_DIR}/src/parser/Parser.cpp - ${LIBRARY_DIR}/src/parser/prefilter.cpp - ${LIBRARY_DIR}/src/parser/shortcut_literal.cpp - ${LIBRARY_DIR}/src/parser/ucp_table.cpp - ${LIBRARY_DIR}/src/parser/unsupported.cpp - ${LIBRARY_DIR}/src/parser/utf8_validate.cpp - ${LIBRARY_DIR}/src/parser/Utf8ComponentClass.cpp - ${LIBRARY_DIR}/src/rose/block.c - ${LIBRARY_DIR}/src/rose/catchup.c - ${LIBRARY_DIR}/src/rose/init.c - ${LIBRARY_DIR}/src/rose/match.c - ${LIBRARY_DIR}/src/rose/program_runtime.c - ${LIBRARY_DIR}/src/rose/rose_build_add_mask.cpp - ${LIBRARY_DIR}/src/rose/rose_build_add.cpp - ${LIBRARY_DIR}/src/rose/rose_build_anchored.cpp - ${LIBRARY_DIR}/src/rose/rose_build_bytecode.cpp - ${LIBRARY_DIR}/src/rose/rose_build_castle.cpp - ${LIBRARY_DIR}/src/rose/rose_build_compile.cpp - ${LIBRARY_DIR}/src/rose/rose_build_convert.cpp - ${LIBRARY_DIR}/src/rose/rose_build_dedupe.cpp - ${LIBRARY_DIR}/src/rose/rose_build_engine_blob.cpp - ${LIBRARY_DIR}/src/rose/rose_build_exclusive.cpp - ${LIBRARY_DIR}/src/rose/rose_build_groups.cpp - ${LIBRARY_DIR}/src/rose/rose_build_infix.cpp - ${LIBRARY_DIR}/src/rose/rose_build_instructions.cpp - ${LIBRARY_DIR}/src/rose/rose_build_lit_accel.cpp - ${LIBRARY_DIR}/src/rose/rose_build_long_lit.cpp - ${LIBRARY_DIR}/src/rose/rose_build_lookaround.cpp - ${LIBRARY_DIR}/src/rose/rose_build_matchers.cpp - ${LIBRARY_DIR}/src/rose/rose_build_merge.cpp - ${LIBRARY_DIR}/src/rose/rose_build_misc.cpp - ${LIBRARY_DIR}/src/rose/rose_build_program.cpp - ${LIBRARY_DIR}/src/rose/rose_build_role_aliasing.cpp - ${LIBRARY_DIR}/src/rose/rose_build_scatter.cpp - ${LIBRARY_DIR}/src/rose/rose_build_width.cpp - ${LIBRARY_DIR}/src/rose/rose_in_util.cpp - ${LIBRARY_DIR}/src/rose/stream.c - ${LIBRARY_DIR}/src/runtime.c - ${LIBRARY_DIR}/src/scratch.c - ${LIBRARY_DIR}/src/smallwrite/smallwrite_build.cpp - ${LIBRARY_DIR}/src/som/slot_manager.cpp - ${LIBRARY_DIR}/src/som/som_runtime.c - ${LIBRARY_DIR}/src/som/som_stream.c - ${LIBRARY_DIR}/src/stream_compress.c - ${LIBRARY_DIR}/src/util/alloc.cpp - ${LIBRARY_DIR}/src/util/charreach.cpp - ${LIBRARY_DIR}/src/util/clique.cpp - ${LIBRARY_DIR}/src/util/compile_context.cpp - ${LIBRARY_DIR}/src/util/compile_error.cpp - ${LIBRARY_DIR}/src/util/cpuid_flags.c - ${LIBRARY_DIR}/src/util/depth.cpp - ${LIBRARY_DIR}/src/util/fatbit_build.cpp - ${LIBRARY_DIR}/src/util/multibit_build.cpp - ${LIBRARY_DIR}/src/util/multibit.c - ${LIBRARY_DIR}/src/util/report_manager.cpp - ${LIBRARY_DIR}/src/util/simd_utils.c - ${LIBRARY_DIR}/src/util/state_compress.c - ${LIBRARY_DIR}/src/util/target_info.cpp - ${LIBRARY_DIR}/src/util/ue2string.cpp + "${LIBRARY_DIR}/src/alloc.c" + "${LIBRARY_DIR}/src/compiler/asserts.cpp" + "${LIBRARY_DIR}/src/compiler/compiler.cpp" + "${LIBRARY_DIR}/src/compiler/error.cpp" + "${LIBRARY_DIR}/src/crc32.c" + "${LIBRARY_DIR}/src/database.c" + "${LIBRARY_DIR}/src/fdr/engine_description.cpp" + "${LIBRARY_DIR}/src/fdr/fdr_compile_util.cpp" + "${LIBRARY_DIR}/src/fdr/fdr_compile.cpp" + "${LIBRARY_DIR}/src/fdr/fdr_confirm_compile.cpp" + "${LIBRARY_DIR}/src/fdr/fdr_engine_description.cpp" + "${LIBRARY_DIR}/src/fdr/fdr.c" + "${LIBRARY_DIR}/src/fdr/flood_compile.cpp" + "${LIBRARY_DIR}/src/fdr/teddy_compile.cpp" + "${LIBRARY_DIR}/src/fdr/teddy_engine_description.cpp" + "${LIBRARY_DIR}/src/fdr/teddy.c" + "${LIBRARY_DIR}/src/grey.cpp" + "${LIBRARY_DIR}/src/hs_valid_platform.c" + "${LIBRARY_DIR}/src/hs_version.c" + "${LIBRARY_DIR}/src/hs.cpp" + "${LIBRARY_DIR}/src/hwlm/hwlm_build.cpp" + "${LIBRARY_DIR}/src/hwlm/hwlm_literal.cpp" + "${LIBRARY_DIR}/src/hwlm/hwlm.c" + "${LIBRARY_DIR}/src/hwlm/noodle_build.cpp" + "${LIBRARY_DIR}/src/hwlm/noodle_engine.c" + "${LIBRARY_DIR}/src/nfa/accel_dfa_build_strat.cpp" + "${LIBRARY_DIR}/src/nfa/accel.c" + "${LIBRARY_DIR}/src/nfa/accelcompile.cpp" + "${LIBRARY_DIR}/src/nfa/castle.c" + "${LIBRARY_DIR}/src/nfa/castlecompile.cpp" + "${LIBRARY_DIR}/src/nfa/dfa_build_strat.cpp" + "${LIBRARY_DIR}/src/nfa/dfa_min.cpp" + "${LIBRARY_DIR}/src/nfa/gough.c" + "${LIBRARY_DIR}/src/nfa/goughcompile_accel.cpp" + "${LIBRARY_DIR}/src/nfa/goughcompile_reg.cpp" + "${LIBRARY_DIR}/src/nfa/goughcompile.cpp" + "${LIBRARY_DIR}/src/nfa/lbr.c" + "${LIBRARY_DIR}/src/nfa/limex_64.c" + "${LIBRARY_DIR}/src/nfa/limex_accel.c" + "${LIBRARY_DIR}/src/nfa/limex_compile.cpp" + "${LIBRARY_DIR}/src/nfa/limex_native.c" + "${LIBRARY_DIR}/src/nfa/limex_simd128.c" + "${LIBRARY_DIR}/src/nfa/limex_simd256.c" + "${LIBRARY_DIR}/src/nfa/limex_simd384.c" + "${LIBRARY_DIR}/src/nfa/limex_simd512.c" + "${LIBRARY_DIR}/src/nfa/mcclellan.c" + "${LIBRARY_DIR}/src/nfa/mcclellancompile_util.cpp" + "${LIBRARY_DIR}/src/nfa/mcclellancompile.cpp" + "${LIBRARY_DIR}/src/nfa/mcsheng_compile.cpp" + "${LIBRARY_DIR}/src/nfa/mcsheng_data.c" + "${LIBRARY_DIR}/src/nfa/mcsheng.c" + "${LIBRARY_DIR}/src/nfa/mpv.c" + "${LIBRARY_DIR}/src/nfa/mpvcompile.cpp" + "${LIBRARY_DIR}/src/nfa/nfa_api_dispatch.c" + "${LIBRARY_DIR}/src/nfa/nfa_build_util.cpp" + "${LIBRARY_DIR}/src/nfa/rdfa_graph.cpp" + "${LIBRARY_DIR}/src/nfa/rdfa_merge.cpp" + "${LIBRARY_DIR}/src/nfa/rdfa.cpp" + "${LIBRARY_DIR}/src/nfa/repeat.c" + "${LIBRARY_DIR}/src/nfa/repeatcompile.cpp" + "${LIBRARY_DIR}/src/nfa/sheng.c" + "${LIBRARY_DIR}/src/nfa/shengcompile.cpp" + "${LIBRARY_DIR}/src/nfa/shufti.c" + "${LIBRARY_DIR}/src/nfa/shufticompile.cpp" + "${LIBRARY_DIR}/src/nfa/tamarama.c" + "${LIBRARY_DIR}/src/nfa/tamaramacompile.cpp" + "${LIBRARY_DIR}/src/nfa/truffle.c" + "${LIBRARY_DIR}/src/nfa/trufflecompile.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_anchored_acyclic.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_anchored_dots.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_asserts.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_builder.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_calc_components.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_cyclic_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_depth.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_dominators.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_edge_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_equivalence.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_execute.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_expr_info.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_extparam.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_fixed_width.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_fuzzy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_haig.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_holder.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_is_equal.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_lbr.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_limex_accel.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_limex.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_literal_analysis.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_literal_component.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_literal_decorated.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_mcclellan.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_misc_opt.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_netflow.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_prefilter.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_prune.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_puff.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_region_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_region.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_repeat.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_reports.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_restructuring.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_revacc.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_sep.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_small_literal_set.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_som_add_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_som_util.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_som.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_split.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_squash.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_stop.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_uncalc_components.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_utf8.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_util.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_vacuous.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_violet.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_width.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng.cpp" + "${LIBRARY_DIR}/src/parser/AsciiComponentClass.cpp" + "${LIBRARY_DIR}/src/parser/buildstate.cpp" + "${LIBRARY_DIR}/src/parser/check_refs.cpp" + "${LIBRARY_DIR}/src/parser/Component.cpp" + "${LIBRARY_DIR}/src/parser/ComponentAlternation.cpp" + "${LIBRARY_DIR}/src/parser/ComponentAssertion.cpp" + "${LIBRARY_DIR}/src/parser/ComponentAtomicGroup.cpp" + "${LIBRARY_DIR}/src/parser/ComponentBackReference.cpp" + "${LIBRARY_DIR}/src/parser/ComponentBoundary.cpp" + "${LIBRARY_DIR}/src/parser/ComponentByte.cpp" + "${LIBRARY_DIR}/src/parser/ComponentClass.cpp" + "${LIBRARY_DIR}/src/parser/ComponentCondReference.cpp" + "${LIBRARY_DIR}/src/parser/ComponentEmpty.cpp" + "${LIBRARY_DIR}/src/parser/ComponentEUS.cpp" + "${LIBRARY_DIR}/src/parser/ComponentRepeat.cpp" + "${LIBRARY_DIR}/src/parser/ComponentSequence.cpp" + "${LIBRARY_DIR}/src/parser/ComponentVisitor.cpp" + "${LIBRARY_DIR}/src/parser/ComponentWordBoundary.cpp" + "${LIBRARY_DIR}/src/parser/ConstComponentVisitor.cpp" + "${LIBRARY_DIR}/src/parser/control_verbs.cpp" + "${LIBRARY_DIR}/src/parser/logical_combination.cpp" + "${LIBRARY_DIR}/src/parser/parse_error.cpp" + "${LIBRARY_DIR}/src/parser/parser_util.cpp" + "${LIBRARY_DIR}/src/parser/Parser.cpp" + "${LIBRARY_DIR}/src/parser/prefilter.cpp" + "${LIBRARY_DIR}/src/parser/shortcut_literal.cpp" + "${LIBRARY_DIR}/src/parser/ucp_table.cpp" + "${LIBRARY_DIR}/src/parser/unsupported.cpp" + "${LIBRARY_DIR}/src/parser/utf8_validate.cpp" + "${LIBRARY_DIR}/src/parser/Utf8ComponentClass.cpp" + "${LIBRARY_DIR}/src/rose/block.c" + "${LIBRARY_DIR}/src/rose/catchup.c" + "${LIBRARY_DIR}/src/rose/init.c" + "${LIBRARY_DIR}/src/rose/match.c" + "${LIBRARY_DIR}/src/rose/program_runtime.c" + "${LIBRARY_DIR}/src/rose/rose_build_add_mask.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_add.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_anchored.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_bytecode.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_castle.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_compile.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_convert.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_dedupe.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_engine_blob.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_exclusive.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_groups.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_infix.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_instructions.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_lit_accel.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_long_lit.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_lookaround.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_matchers.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_merge.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_misc.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_program.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_role_aliasing.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_scatter.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_width.cpp" + "${LIBRARY_DIR}/src/rose/rose_in_util.cpp" + "${LIBRARY_DIR}/src/rose/stream.c" + "${LIBRARY_DIR}/src/runtime.c" + "${LIBRARY_DIR}/src/scratch.c" + "${LIBRARY_DIR}/src/smallwrite/smallwrite_build.cpp" + "${LIBRARY_DIR}/src/som/slot_manager.cpp" + "${LIBRARY_DIR}/src/som/som_runtime.c" + "${LIBRARY_DIR}/src/som/som_stream.c" + "${LIBRARY_DIR}/src/stream_compress.c" + "${LIBRARY_DIR}/src/util/alloc.cpp" + "${LIBRARY_DIR}/src/util/charreach.cpp" + "${LIBRARY_DIR}/src/util/clique.cpp" + "${LIBRARY_DIR}/src/util/compile_context.cpp" + "${LIBRARY_DIR}/src/util/compile_error.cpp" + "${LIBRARY_DIR}/src/util/cpuid_flags.c" + "${LIBRARY_DIR}/src/util/depth.cpp" + "${LIBRARY_DIR}/src/util/fatbit_build.cpp" + "${LIBRARY_DIR}/src/util/multibit_build.cpp" + "${LIBRARY_DIR}/src/util/multibit.c" + "${LIBRARY_DIR}/src/util/report_manager.cpp" + "${LIBRARY_DIR}/src/util/simd_utils.c" + "${LIBRARY_DIR}/src/util/state_compress.c" + "${LIBRARY_DIR}/src/util/target_info.cpp" + "${LIBRARY_DIR}/src/util/ue2string.cpp" ) add_library (hyperscan ${SRCS}) @@ -259,9 +259,9 @@ if (NOT EXTERNAL_HYPERSCAN_LIBRARY_FOUND) target_include_directories (hyperscan PRIVATE common - ${LIBRARY_DIR}/include + "${LIBRARY_DIR}/include" ) - target_include_directories (hyperscan SYSTEM PUBLIC ${LIBRARY_DIR}/src) + target_include_directories (hyperscan SYSTEM PUBLIC "${LIBRARY_DIR}/src") if (ARCH_AMD64) target_include_directories (hyperscan PRIVATE x86_64) endif () diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index 884f5c3a336..26f3bb11006 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -1,447 +1,447 @@ -set(ICU_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source) -set(ICUDATA_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/icudata/) +set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source") +set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/") set (CMAKE_CXX_STANDARD 17) # These lists of sources were generated from build log of the original ICU build system (configure + make). set(ICUUC_SOURCES -${ICU_SOURCE_DIR}/common/errorcode.cpp -${ICU_SOURCE_DIR}/common/putil.cpp -${ICU_SOURCE_DIR}/common/umath.cpp -${ICU_SOURCE_DIR}/common/utypes.cpp -${ICU_SOURCE_DIR}/common/uinvchar.cpp -${ICU_SOURCE_DIR}/common/umutex.cpp -${ICU_SOURCE_DIR}/common/ucln_cmn.cpp -${ICU_SOURCE_DIR}/common/uinit.cpp -${ICU_SOURCE_DIR}/common/uobject.cpp -${ICU_SOURCE_DIR}/common/cmemory.cpp -${ICU_SOURCE_DIR}/common/charstr.cpp -${ICU_SOURCE_DIR}/common/cstr.cpp -${ICU_SOURCE_DIR}/common/udata.cpp -${ICU_SOURCE_DIR}/common/ucmndata.cpp -${ICU_SOURCE_DIR}/common/udatamem.cpp -${ICU_SOURCE_DIR}/common/umapfile.cpp -${ICU_SOURCE_DIR}/common/udataswp.cpp -${ICU_SOURCE_DIR}/common/utrie_swap.cpp -${ICU_SOURCE_DIR}/common/ucol_swp.cpp -${ICU_SOURCE_DIR}/common/utrace.cpp -${ICU_SOURCE_DIR}/common/uhash.cpp -${ICU_SOURCE_DIR}/common/uhash_us.cpp -${ICU_SOURCE_DIR}/common/uenum.cpp -${ICU_SOURCE_DIR}/common/ustrenum.cpp -${ICU_SOURCE_DIR}/common/uvector.cpp -${ICU_SOURCE_DIR}/common/ustack.cpp -${ICU_SOURCE_DIR}/common/uvectr32.cpp -${ICU_SOURCE_DIR}/common/uvectr64.cpp -${ICU_SOURCE_DIR}/common/ucnv.cpp -${ICU_SOURCE_DIR}/common/ucnv_bld.cpp -${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp -${ICU_SOURCE_DIR}/common/ucnv_io.cpp -${ICU_SOURCE_DIR}/common/ucnv_cb.cpp -${ICU_SOURCE_DIR}/common/ucnv_err.cpp -${ICU_SOURCE_DIR}/common/ucnvlat1.cpp -${ICU_SOURCE_DIR}/common/ucnv_u7.cpp -${ICU_SOURCE_DIR}/common/ucnv_u8.cpp -${ICU_SOURCE_DIR}/common/ucnv_u16.cpp -${ICU_SOURCE_DIR}/common/ucnv_u32.cpp -${ICU_SOURCE_DIR}/common/ucnvscsu.cpp -${ICU_SOURCE_DIR}/common/ucnvbocu.cpp -${ICU_SOURCE_DIR}/common/ucnv_ext.cpp -${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp -${ICU_SOURCE_DIR}/common/ucnv2022.cpp -${ICU_SOURCE_DIR}/common/ucnvhz.cpp -${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp -${ICU_SOURCE_DIR}/common/ucnvisci.cpp -${ICU_SOURCE_DIR}/common/ucnvdisp.cpp -${ICU_SOURCE_DIR}/common/ucnv_set.cpp -${ICU_SOURCE_DIR}/common/ucnv_ct.cpp -${ICU_SOURCE_DIR}/common/resource.cpp -${ICU_SOURCE_DIR}/common/uresbund.cpp -${ICU_SOURCE_DIR}/common/ures_cnv.cpp -${ICU_SOURCE_DIR}/common/uresdata.cpp -${ICU_SOURCE_DIR}/common/resbund.cpp -${ICU_SOURCE_DIR}/common/resbund_cnv.cpp -${ICU_SOURCE_DIR}/common/ucurr.cpp -${ICU_SOURCE_DIR}/common/localebuilder.cpp -${ICU_SOURCE_DIR}/common/localeprioritylist.cpp -${ICU_SOURCE_DIR}/common/messagepattern.cpp -${ICU_SOURCE_DIR}/common/ucat.cpp -${ICU_SOURCE_DIR}/common/locmap.cpp -${ICU_SOURCE_DIR}/common/uloc.cpp -${ICU_SOURCE_DIR}/common/locid.cpp -${ICU_SOURCE_DIR}/common/locutil.cpp -${ICU_SOURCE_DIR}/common/locavailable.cpp -${ICU_SOURCE_DIR}/common/locdispnames.cpp -${ICU_SOURCE_DIR}/common/locdspnm.cpp -${ICU_SOURCE_DIR}/common/loclikely.cpp -${ICU_SOURCE_DIR}/common/locresdata.cpp -${ICU_SOURCE_DIR}/common/lsr.cpp -${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp -${ICU_SOURCE_DIR}/common/locdistance.cpp -${ICU_SOURCE_DIR}/common/localematcher.cpp -${ICU_SOURCE_DIR}/common/bytestream.cpp -${ICU_SOURCE_DIR}/common/stringpiece.cpp -${ICU_SOURCE_DIR}/common/bytesinkutil.cpp -${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp -${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp -${ICU_SOURCE_DIR}/common/bytestrie.cpp -${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp -${ICU_SOURCE_DIR}/common/ucharstrie.cpp -${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp -${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp -${ICU_SOURCE_DIR}/common/dictionarydata.cpp -${ICU_SOURCE_DIR}/common/edits.cpp -${ICU_SOURCE_DIR}/common/appendable.cpp -${ICU_SOURCE_DIR}/common/ustr_cnv.cpp -${ICU_SOURCE_DIR}/common/unistr_cnv.cpp -${ICU_SOURCE_DIR}/common/unistr.cpp -${ICU_SOURCE_DIR}/common/unistr_case.cpp -${ICU_SOURCE_DIR}/common/unistr_props.cpp -${ICU_SOURCE_DIR}/common/utf_impl.cpp -${ICU_SOURCE_DIR}/common/ustring.cpp -${ICU_SOURCE_DIR}/common/ustrcase.cpp -${ICU_SOURCE_DIR}/common/ucasemap.cpp -${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp -${ICU_SOURCE_DIR}/common/cstring.cpp -${ICU_SOURCE_DIR}/common/ustrfmt.cpp -${ICU_SOURCE_DIR}/common/ustrtrns.cpp -${ICU_SOURCE_DIR}/common/ustr_wcs.cpp -${ICU_SOURCE_DIR}/common/utext.cpp -${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp -${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp -${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp -${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp -${ICU_SOURCE_DIR}/common/normalizer2impl.cpp -${ICU_SOURCE_DIR}/common/normalizer2.cpp -${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp -${ICU_SOURCE_DIR}/common/normlzr.cpp -${ICU_SOURCE_DIR}/common/unorm.cpp -${ICU_SOURCE_DIR}/common/unormcmp.cpp -${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp -${ICU_SOURCE_DIR}/common/chariter.cpp -${ICU_SOURCE_DIR}/common/schriter.cpp -${ICU_SOURCE_DIR}/common/uchriter.cpp -${ICU_SOURCE_DIR}/common/uiter.cpp -${ICU_SOURCE_DIR}/common/patternprops.cpp -${ICU_SOURCE_DIR}/common/uchar.cpp -${ICU_SOURCE_DIR}/common/uprops.cpp -${ICU_SOURCE_DIR}/common/ucase.cpp -${ICU_SOURCE_DIR}/common/propname.cpp -${ICU_SOURCE_DIR}/common/ubidi_props.cpp -${ICU_SOURCE_DIR}/common/characterproperties.cpp -${ICU_SOURCE_DIR}/common/ubidi.cpp -${ICU_SOURCE_DIR}/common/ubidiwrt.cpp -${ICU_SOURCE_DIR}/common/ubidiln.cpp -${ICU_SOURCE_DIR}/common/ushape.cpp -${ICU_SOURCE_DIR}/common/uscript.cpp -${ICU_SOURCE_DIR}/common/uscript_props.cpp -${ICU_SOURCE_DIR}/common/usc_impl.cpp -${ICU_SOURCE_DIR}/common/unames.cpp -${ICU_SOURCE_DIR}/common/utrie.cpp -${ICU_SOURCE_DIR}/common/utrie2.cpp -${ICU_SOURCE_DIR}/common/utrie2_builder.cpp -${ICU_SOURCE_DIR}/common/ucptrie.cpp -${ICU_SOURCE_DIR}/common/umutablecptrie.cpp -${ICU_SOURCE_DIR}/common/bmpset.cpp -${ICU_SOURCE_DIR}/common/unisetspan.cpp -${ICU_SOURCE_DIR}/common/uset_props.cpp -${ICU_SOURCE_DIR}/common/uniset_props.cpp -${ICU_SOURCE_DIR}/common/uniset_closure.cpp -${ICU_SOURCE_DIR}/common/uset.cpp -${ICU_SOURCE_DIR}/common/uniset.cpp -${ICU_SOURCE_DIR}/common/usetiter.cpp -${ICU_SOURCE_DIR}/common/ruleiter.cpp -${ICU_SOURCE_DIR}/common/caniter.cpp -${ICU_SOURCE_DIR}/common/unifilt.cpp -${ICU_SOURCE_DIR}/common/unifunct.cpp -${ICU_SOURCE_DIR}/common/uarrsort.cpp -${ICU_SOURCE_DIR}/common/brkiter.cpp -${ICU_SOURCE_DIR}/common/ubrk.cpp -${ICU_SOURCE_DIR}/common/brkeng.cpp -${ICU_SOURCE_DIR}/common/dictbe.cpp -${ICU_SOURCE_DIR}/common/filteredbrk.cpp -${ICU_SOURCE_DIR}/common/rbbi.cpp -${ICU_SOURCE_DIR}/common/rbbidata.cpp -${ICU_SOURCE_DIR}/common/rbbinode.cpp -${ICU_SOURCE_DIR}/common/rbbirb.cpp -${ICU_SOURCE_DIR}/common/rbbiscan.cpp -${ICU_SOURCE_DIR}/common/rbbisetb.cpp -${ICU_SOURCE_DIR}/common/rbbistbl.cpp -${ICU_SOURCE_DIR}/common/rbbitblb.cpp -${ICU_SOURCE_DIR}/common/rbbi_cache.cpp -${ICU_SOURCE_DIR}/common/serv.cpp -${ICU_SOURCE_DIR}/common/servnotf.cpp -${ICU_SOURCE_DIR}/common/servls.cpp -${ICU_SOURCE_DIR}/common/servlk.cpp -${ICU_SOURCE_DIR}/common/servlkf.cpp -${ICU_SOURCE_DIR}/common/servrbf.cpp -${ICU_SOURCE_DIR}/common/servslkf.cpp -${ICU_SOURCE_DIR}/common/uidna.cpp -${ICU_SOURCE_DIR}/common/usprep.cpp -${ICU_SOURCE_DIR}/common/uts46.cpp -${ICU_SOURCE_DIR}/common/punycode.cpp -${ICU_SOURCE_DIR}/common/util.cpp -${ICU_SOURCE_DIR}/common/util_props.cpp -${ICU_SOURCE_DIR}/common/parsepos.cpp -${ICU_SOURCE_DIR}/common/locbased.cpp -${ICU_SOURCE_DIR}/common/cwchar.cpp -${ICU_SOURCE_DIR}/common/wintz.cpp -${ICU_SOURCE_DIR}/common/dtintrv.cpp -${ICU_SOURCE_DIR}/common/ucnvsel.cpp -${ICU_SOURCE_DIR}/common/propsvec.cpp -${ICU_SOURCE_DIR}/common/ulist.cpp -${ICU_SOURCE_DIR}/common/uloc_tag.cpp -${ICU_SOURCE_DIR}/common/icudataver.cpp -${ICU_SOURCE_DIR}/common/icuplug.cpp -${ICU_SOURCE_DIR}/common/sharedobject.cpp -${ICU_SOURCE_DIR}/common/simpleformatter.cpp -${ICU_SOURCE_DIR}/common/unifiedcache.cpp -${ICU_SOURCE_DIR}/common/uloc_keytype.cpp -${ICU_SOURCE_DIR}/common/ubiditransform.cpp -${ICU_SOURCE_DIR}/common/pluralmap.cpp -${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp -${ICU_SOURCE_DIR}/common/restrace.cpp) +"${ICU_SOURCE_DIR}/common/errorcode.cpp" +"${ICU_SOURCE_DIR}/common/putil.cpp" +"${ICU_SOURCE_DIR}/common/umath.cpp" +"${ICU_SOURCE_DIR}/common/utypes.cpp" +"${ICU_SOURCE_DIR}/common/uinvchar.cpp" +"${ICU_SOURCE_DIR}/common/umutex.cpp" +"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp" +"${ICU_SOURCE_DIR}/common/uinit.cpp" +"${ICU_SOURCE_DIR}/common/uobject.cpp" +"${ICU_SOURCE_DIR}/common/cmemory.cpp" +"${ICU_SOURCE_DIR}/common/charstr.cpp" +"${ICU_SOURCE_DIR}/common/cstr.cpp" +"${ICU_SOURCE_DIR}/common/udata.cpp" +"${ICU_SOURCE_DIR}/common/ucmndata.cpp" +"${ICU_SOURCE_DIR}/common/udatamem.cpp" +"${ICU_SOURCE_DIR}/common/umapfile.cpp" +"${ICU_SOURCE_DIR}/common/udataswp.cpp" +"${ICU_SOURCE_DIR}/common/utrie_swap.cpp" +"${ICU_SOURCE_DIR}/common/ucol_swp.cpp" +"${ICU_SOURCE_DIR}/common/utrace.cpp" +"${ICU_SOURCE_DIR}/common/uhash.cpp" +"${ICU_SOURCE_DIR}/common/uhash_us.cpp" +"${ICU_SOURCE_DIR}/common/uenum.cpp" +"${ICU_SOURCE_DIR}/common/ustrenum.cpp" +"${ICU_SOURCE_DIR}/common/uvector.cpp" +"${ICU_SOURCE_DIR}/common/ustack.cpp" +"${ICU_SOURCE_DIR}/common/uvectr32.cpp" +"${ICU_SOURCE_DIR}/common/uvectr64.cpp" +"${ICU_SOURCE_DIR}/common/ucnv.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_io.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_err.cpp" +"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp" +"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp" +"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp" +"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp" +"${ICU_SOURCE_DIR}/common/ucnv2022.cpp" +"${ICU_SOURCE_DIR}/common/ucnvhz.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp" +"${ICU_SOURCE_DIR}/common/ucnvisci.cpp" +"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_set.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp" +"${ICU_SOURCE_DIR}/common/resource.cpp" +"${ICU_SOURCE_DIR}/common/uresbund.cpp" +"${ICU_SOURCE_DIR}/common/ures_cnv.cpp" +"${ICU_SOURCE_DIR}/common/uresdata.cpp" +"${ICU_SOURCE_DIR}/common/resbund.cpp" +"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp" +"${ICU_SOURCE_DIR}/common/ucurr.cpp" +"${ICU_SOURCE_DIR}/common/localebuilder.cpp" +"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp" +"${ICU_SOURCE_DIR}/common/messagepattern.cpp" +"${ICU_SOURCE_DIR}/common/ucat.cpp" +"${ICU_SOURCE_DIR}/common/locmap.cpp" +"${ICU_SOURCE_DIR}/common/uloc.cpp" +"${ICU_SOURCE_DIR}/common/locid.cpp" +"${ICU_SOURCE_DIR}/common/locutil.cpp" +"${ICU_SOURCE_DIR}/common/locavailable.cpp" +"${ICU_SOURCE_DIR}/common/locdispnames.cpp" +"${ICU_SOURCE_DIR}/common/locdspnm.cpp" +"${ICU_SOURCE_DIR}/common/loclikely.cpp" +"${ICU_SOURCE_DIR}/common/locresdata.cpp" +"${ICU_SOURCE_DIR}/common/lsr.cpp" +"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp" +"${ICU_SOURCE_DIR}/common/locdistance.cpp" +"${ICU_SOURCE_DIR}/common/localematcher.cpp" +"${ICU_SOURCE_DIR}/common/bytestream.cpp" +"${ICU_SOURCE_DIR}/common/stringpiece.cpp" +"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp" +"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/bytestrie.cpp" +"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp" +"${ICU_SOURCE_DIR}/common/ucharstrie.cpp" +"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp" +"${ICU_SOURCE_DIR}/common/dictionarydata.cpp" +"${ICU_SOURCE_DIR}/common/edits.cpp" +"${ICU_SOURCE_DIR}/common/appendable.cpp" +"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp" +"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp" +"${ICU_SOURCE_DIR}/common/unistr.cpp" +"${ICU_SOURCE_DIR}/common/unistr_case.cpp" +"${ICU_SOURCE_DIR}/common/unistr_props.cpp" +"${ICU_SOURCE_DIR}/common/utf_impl.cpp" +"${ICU_SOURCE_DIR}/common/ustring.cpp" +"${ICU_SOURCE_DIR}/common/ustrcase.cpp" +"${ICU_SOURCE_DIR}/common/ucasemap.cpp" +"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/cstring.cpp" +"${ICU_SOURCE_DIR}/common/ustrfmt.cpp" +"${ICU_SOURCE_DIR}/common/ustrtrns.cpp" +"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp" +"${ICU_SOURCE_DIR}/common/utext.cpp" +"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp" +"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp" +"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp" +"${ICU_SOURCE_DIR}/common/normalizer2.cpp" +"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp" +"${ICU_SOURCE_DIR}/common/normlzr.cpp" +"${ICU_SOURCE_DIR}/common/unorm.cpp" +"${ICU_SOURCE_DIR}/common/unormcmp.cpp" +"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp" +"${ICU_SOURCE_DIR}/common/chariter.cpp" +"${ICU_SOURCE_DIR}/common/schriter.cpp" +"${ICU_SOURCE_DIR}/common/uchriter.cpp" +"${ICU_SOURCE_DIR}/common/uiter.cpp" +"${ICU_SOURCE_DIR}/common/patternprops.cpp" +"${ICU_SOURCE_DIR}/common/uchar.cpp" +"${ICU_SOURCE_DIR}/common/uprops.cpp" +"${ICU_SOURCE_DIR}/common/ucase.cpp" +"${ICU_SOURCE_DIR}/common/propname.cpp" +"${ICU_SOURCE_DIR}/common/ubidi_props.cpp" +"${ICU_SOURCE_DIR}/common/characterproperties.cpp" +"${ICU_SOURCE_DIR}/common/ubidi.cpp" +"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp" +"${ICU_SOURCE_DIR}/common/ubidiln.cpp" +"${ICU_SOURCE_DIR}/common/ushape.cpp" +"${ICU_SOURCE_DIR}/common/uscript.cpp" +"${ICU_SOURCE_DIR}/common/uscript_props.cpp" +"${ICU_SOURCE_DIR}/common/usc_impl.cpp" +"${ICU_SOURCE_DIR}/common/unames.cpp" +"${ICU_SOURCE_DIR}/common/utrie.cpp" +"${ICU_SOURCE_DIR}/common/utrie2.cpp" +"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp" +"${ICU_SOURCE_DIR}/common/ucptrie.cpp" +"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp" +"${ICU_SOURCE_DIR}/common/bmpset.cpp" +"${ICU_SOURCE_DIR}/common/unisetspan.cpp" +"${ICU_SOURCE_DIR}/common/uset_props.cpp" +"${ICU_SOURCE_DIR}/common/uniset_props.cpp" +"${ICU_SOURCE_DIR}/common/uniset_closure.cpp" +"${ICU_SOURCE_DIR}/common/uset.cpp" +"${ICU_SOURCE_DIR}/common/uniset.cpp" +"${ICU_SOURCE_DIR}/common/usetiter.cpp" +"${ICU_SOURCE_DIR}/common/ruleiter.cpp" +"${ICU_SOURCE_DIR}/common/caniter.cpp" +"${ICU_SOURCE_DIR}/common/unifilt.cpp" +"${ICU_SOURCE_DIR}/common/unifunct.cpp" +"${ICU_SOURCE_DIR}/common/uarrsort.cpp" +"${ICU_SOURCE_DIR}/common/brkiter.cpp" +"${ICU_SOURCE_DIR}/common/ubrk.cpp" +"${ICU_SOURCE_DIR}/common/brkeng.cpp" +"${ICU_SOURCE_DIR}/common/dictbe.cpp" +"${ICU_SOURCE_DIR}/common/filteredbrk.cpp" +"${ICU_SOURCE_DIR}/common/rbbi.cpp" +"${ICU_SOURCE_DIR}/common/rbbidata.cpp" +"${ICU_SOURCE_DIR}/common/rbbinode.cpp" +"${ICU_SOURCE_DIR}/common/rbbirb.cpp" +"${ICU_SOURCE_DIR}/common/rbbiscan.cpp" +"${ICU_SOURCE_DIR}/common/rbbisetb.cpp" +"${ICU_SOURCE_DIR}/common/rbbistbl.cpp" +"${ICU_SOURCE_DIR}/common/rbbitblb.cpp" +"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp" +"${ICU_SOURCE_DIR}/common/serv.cpp" +"${ICU_SOURCE_DIR}/common/servnotf.cpp" +"${ICU_SOURCE_DIR}/common/servls.cpp" +"${ICU_SOURCE_DIR}/common/servlk.cpp" +"${ICU_SOURCE_DIR}/common/servlkf.cpp" +"${ICU_SOURCE_DIR}/common/servrbf.cpp" +"${ICU_SOURCE_DIR}/common/servslkf.cpp" +"${ICU_SOURCE_DIR}/common/uidna.cpp" +"${ICU_SOURCE_DIR}/common/usprep.cpp" +"${ICU_SOURCE_DIR}/common/uts46.cpp" +"${ICU_SOURCE_DIR}/common/punycode.cpp" +"${ICU_SOURCE_DIR}/common/util.cpp" +"${ICU_SOURCE_DIR}/common/util_props.cpp" +"${ICU_SOURCE_DIR}/common/parsepos.cpp" +"${ICU_SOURCE_DIR}/common/locbased.cpp" +"${ICU_SOURCE_DIR}/common/cwchar.cpp" +"${ICU_SOURCE_DIR}/common/wintz.cpp" +"${ICU_SOURCE_DIR}/common/dtintrv.cpp" +"${ICU_SOURCE_DIR}/common/ucnvsel.cpp" +"${ICU_SOURCE_DIR}/common/propsvec.cpp" +"${ICU_SOURCE_DIR}/common/ulist.cpp" +"${ICU_SOURCE_DIR}/common/uloc_tag.cpp" +"${ICU_SOURCE_DIR}/common/icudataver.cpp" +"${ICU_SOURCE_DIR}/common/icuplug.cpp" +"${ICU_SOURCE_DIR}/common/sharedobject.cpp" +"${ICU_SOURCE_DIR}/common/simpleformatter.cpp" +"${ICU_SOURCE_DIR}/common/unifiedcache.cpp" +"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp" +"${ICU_SOURCE_DIR}/common/ubiditransform.cpp" +"${ICU_SOURCE_DIR}/common/pluralmap.cpp" +"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp" +"${ICU_SOURCE_DIR}/common/restrace.cpp") set(ICUI18N_SOURCES -${ICU_SOURCE_DIR}/i18n/ucln_in.cpp -${ICU_SOURCE_DIR}/i18n/fmtable.cpp -${ICU_SOURCE_DIR}/i18n/format.cpp -${ICU_SOURCE_DIR}/i18n/msgfmt.cpp -${ICU_SOURCE_DIR}/i18n/umsg.cpp -${ICU_SOURCE_DIR}/i18n/numfmt.cpp -${ICU_SOURCE_DIR}/i18n/unum.cpp -${ICU_SOURCE_DIR}/i18n/decimfmt.cpp -${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp -${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp -${ICU_SOURCE_DIR}/i18n/choicfmt.cpp -${ICU_SOURCE_DIR}/i18n/datefmt.cpp -${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp -${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp -${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp -${ICU_SOURCE_DIR}/i18n/udat.cpp -${ICU_SOURCE_DIR}/i18n/dtptngen.cpp -${ICU_SOURCE_DIR}/i18n/udatpg.cpp -${ICU_SOURCE_DIR}/i18n/nfrs.cpp -${ICU_SOURCE_DIR}/i18n/nfrule.cpp -${ICU_SOURCE_DIR}/i18n/nfsubs.cpp -${ICU_SOURCE_DIR}/i18n/rbnf.cpp -${ICU_SOURCE_DIR}/i18n/numsys.cpp -${ICU_SOURCE_DIR}/i18n/unumsys.cpp -${ICU_SOURCE_DIR}/i18n/ucsdet.cpp -${ICU_SOURCE_DIR}/i18n/ucal.cpp -${ICU_SOURCE_DIR}/i18n/calendar.cpp -${ICU_SOURCE_DIR}/i18n/gregocal.cpp -${ICU_SOURCE_DIR}/i18n/timezone.cpp -${ICU_SOURCE_DIR}/i18n/simpletz.cpp -${ICU_SOURCE_DIR}/i18n/olsontz.cpp -${ICU_SOURCE_DIR}/i18n/astro.cpp -${ICU_SOURCE_DIR}/i18n/taiwncal.cpp -${ICU_SOURCE_DIR}/i18n/buddhcal.cpp -${ICU_SOURCE_DIR}/i18n/persncal.cpp -${ICU_SOURCE_DIR}/i18n/islamcal.cpp -${ICU_SOURCE_DIR}/i18n/japancal.cpp -${ICU_SOURCE_DIR}/i18n/gregoimp.cpp -${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp -${ICU_SOURCE_DIR}/i18n/indiancal.cpp -${ICU_SOURCE_DIR}/i18n/chnsecal.cpp -${ICU_SOURCE_DIR}/i18n/cecal.cpp -${ICU_SOURCE_DIR}/i18n/coptccal.cpp -${ICU_SOURCE_DIR}/i18n/dangical.cpp -${ICU_SOURCE_DIR}/i18n/ethpccal.cpp -${ICU_SOURCE_DIR}/i18n/coleitr.cpp -${ICU_SOURCE_DIR}/i18n/coll.cpp -${ICU_SOURCE_DIR}/i18n/sortkey.cpp -${ICU_SOURCE_DIR}/i18n/bocsu.cpp -${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp -${ICU_SOURCE_DIR}/i18n/ucol.cpp -${ICU_SOURCE_DIR}/i18n/ucol_res.cpp -${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp -${ICU_SOURCE_DIR}/i18n/collation.cpp -${ICU_SOURCE_DIR}/i18n/collationsettings.cpp -${ICU_SOURCE_DIR}/i18n/collationdata.cpp -${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp -${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp -${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp -${ICU_SOURCE_DIR}/i18n/collationfcd.cpp -${ICU_SOURCE_DIR}/i18n/collationiterator.cpp -${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp -${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp -${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp -${ICU_SOURCE_DIR}/i18n/collationsets.cpp -${ICU_SOURCE_DIR}/i18n/collationcompare.cpp -${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp -${ICU_SOURCE_DIR}/i18n/collationkeys.cpp -${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp -${ICU_SOURCE_DIR}/i18n/collationroot.cpp -${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp -${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp -${ICU_SOURCE_DIR}/i18n/collationweights.cpp -${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp -${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp -${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp -${ICU_SOURCE_DIR}/i18n/listformatter.cpp -${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp -${ICU_SOURCE_DIR}/i18n/strmatch.cpp -${ICU_SOURCE_DIR}/i18n/usearch.cpp -${ICU_SOURCE_DIR}/i18n/search.cpp -${ICU_SOURCE_DIR}/i18n/stsearch.cpp -${ICU_SOURCE_DIR}/i18n/translit.cpp -${ICU_SOURCE_DIR}/i18n/utrans.cpp -${ICU_SOURCE_DIR}/i18n/esctrn.cpp -${ICU_SOURCE_DIR}/i18n/unesctrn.cpp -${ICU_SOURCE_DIR}/i18n/funcrepl.cpp -${ICU_SOURCE_DIR}/i18n/strrepl.cpp -${ICU_SOURCE_DIR}/i18n/tridpars.cpp -${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp -${ICU_SOURCE_DIR}/i18n/rbt.cpp -${ICU_SOURCE_DIR}/i18n/rbt_data.cpp -${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp -${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp -${ICU_SOURCE_DIR}/i18n/rbt_set.cpp -${ICU_SOURCE_DIR}/i18n/nultrans.cpp -${ICU_SOURCE_DIR}/i18n/remtrans.cpp -${ICU_SOURCE_DIR}/i18n/casetrn.cpp -${ICU_SOURCE_DIR}/i18n/titletrn.cpp -${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp -${ICU_SOURCE_DIR}/i18n/toupptrn.cpp -${ICU_SOURCE_DIR}/i18n/anytrans.cpp -${ICU_SOURCE_DIR}/i18n/name2uni.cpp -${ICU_SOURCE_DIR}/i18n/uni2name.cpp -${ICU_SOURCE_DIR}/i18n/nortrans.cpp -${ICU_SOURCE_DIR}/i18n/quant.cpp -${ICU_SOURCE_DIR}/i18n/transreg.cpp -${ICU_SOURCE_DIR}/i18n/brktrans.cpp -${ICU_SOURCE_DIR}/i18n/regexcmp.cpp -${ICU_SOURCE_DIR}/i18n/rematch.cpp -${ICU_SOURCE_DIR}/i18n/repattrn.cpp -${ICU_SOURCE_DIR}/i18n/regexst.cpp -${ICU_SOURCE_DIR}/i18n/regextxt.cpp -${ICU_SOURCE_DIR}/i18n/regeximp.cpp -${ICU_SOURCE_DIR}/i18n/uregex.cpp -${ICU_SOURCE_DIR}/i18n/uregexc.cpp -${ICU_SOURCE_DIR}/i18n/ulocdata.cpp -${ICU_SOURCE_DIR}/i18n/measfmt.cpp -${ICU_SOURCE_DIR}/i18n/currfmt.cpp -${ICU_SOURCE_DIR}/i18n/curramt.cpp -${ICU_SOURCE_DIR}/i18n/currunit.cpp -${ICU_SOURCE_DIR}/i18n/measure.cpp -${ICU_SOURCE_DIR}/i18n/utmscale.cpp -${ICU_SOURCE_DIR}/i18n/csdetect.cpp -${ICU_SOURCE_DIR}/i18n/csmatch.cpp -${ICU_SOURCE_DIR}/i18n/csr2022.cpp -${ICU_SOURCE_DIR}/i18n/csrecog.cpp -${ICU_SOURCE_DIR}/i18n/csrmbcs.cpp -${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp -${ICU_SOURCE_DIR}/i18n/csrucode.cpp -${ICU_SOURCE_DIR}/i18n/csrutf8.cpp -${ICU_SOURCE_DIR}/i18n/inputext.cpp -${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp -${ICU_SOURCE_DIR}/i18n/windtfmt.cpp -${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp -${ICU_SOURCE_DIR}/i18n/basictz.cpp -${ICU_SOURCE_DIR}/i18n/dtrule.cpp -${ICU_SOURCE_DIR}/i18n/rbtz.cpp -${ICU_SOURCE_DIR}/i18n/tzrule.cpp -${ICU_SOURCE_DIR}/i18n/tztrans.cpp -${ICU_SOURCE_DIR}/i18n/vtzone.cpp -${ICU_SOURCE_DIR}/i18n/zonemeta.cpp -${ICU_SOURCE_DIR}/i18n/standardplural.cpp -${ICU_SOURCE_DIR}/i18n/upluralrules.cpp -${ICU_SOURCE_DIR}/i18n/plurrule.cpp -${ICU_SOURCE_DIR}/i18n/plurfmt.cpp -${ICU_SOURCE_DIR}/i18n/selfmt.cpp -${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp -${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp -${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp -${ICU_SOURCE_DIR}/i18n/tmunit.cpp -${ICU_SOURCE_DIR}/i18n/tmutamt.cpp -${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp -${ICU_SOURCE_DIR}/i18n/currpinf.cpp -${ICU_SOURCE_DIR}/i18n/uspoof.cpp -${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp -${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp -${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp -${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp -${ICU_SOURCE_DIR}/i18n/ztrans.cpp -${ICU_SOURCE_DIR}/i18n/zrule.cpp -${ICU_SOURCE_DIR}/i18n/vzone.cpp -${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp -${ICU_SOURCE_DIR}/i18n/fpositer.cpp -${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp -${ICU_SOURCE_DIR}/i18n/decNumber.cpp -${ICU_SOURCE_DIR}/i18n/decContext.cpp -${ICU_SOURCE_DIR}/i18n/alphaindex.cpp -${ICU_SOURCE_DIR}/i18n/tznames.cpp -${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp -${ICU_SOURCE_DIR}/i18n/tzgnames.cpp -${ICU_SOURCE_DIR}/i18n/tzfmt.cpp -${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp -${ICU_SOURCE_DIR}/i18n/gender.cpp -${ICU_SOURCE_DIR}/i18n/region.cpp -${ICU_SOURCE_DIR}/i18n/scriptset.cpp -${ICU_SOURCE_DIR}/i18n/uregion.cpp -${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp -${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp -${ICU_SOURCE_DIR}/i18n/measunit.cpp -${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp -${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp -${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp -${ICU_SOURCE_DIR}/i18n/nounit.cpp -${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp -${ICU_SOURCE_DIR}/i18n/number_compact.cpp -${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp -${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp -${ICU_SOURCE_DIR}/i18n/number_fluent.cpp -${ICU_SOURCE_DIR}/i18n/number_formatimpl.cpp -${ICU_SOURCE_DIR}/i18n/number_grouping.cpp -${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp -${ICU_SOURCE_DIR}/i18n/number_longnames.cpp -${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp -${ICU_SOURCE_DIR}/i18n/number_notation.cpp -${ICU_SOURCE_DIR}/i18n/number_output.cpp -${ICU_SOURCE_DIR}/i18n/number_padding.cpp -${ICU_SOURCE_DIR}/i18n/number_patternmodifier.cpp -${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp -${ICU_SOURCE_DIR}/i18n/number_rounding.cpp -${ICU_SOURCE_DIR}/i18n/number_scientific.cpp -${ICU_SOURCE_DIR}/i18n/number_utils.cpp -${ICU_SOURCE_DIR}/i18n/number_asformat.cpp -${ICU_SOURCE_DIR}/i18n/number_mapper.cpp -${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp -${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp -${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp -${ICU_SOURCE_DIR}/i18n/number_capi.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp -${ICU_SOURCE_DIR}/i18n/string_segment.cpp -${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp -${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp -${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp -${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp -${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp -${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp -${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp -${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp -${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp -${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp -${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp -${ICU_SOURCE_DIR}/i18n/erarules.cpp -${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp -${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp -${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp -${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp) +"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" +"${ICU_SOURCE_DIR}/i18n/fmtable.cpp" +"${ICU_SOURCE_DIR}/i18n/format.cpp" +"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/umsg.cpp" +"${ICU_SOURCE_DIR}/i18n/numfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/unum.cpp" +"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp" +"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp" +"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/datefmt.cpp" +"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp" +"${ICU_SOURCE_DIR}/i18n/udat.cpp" +"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp" +"${ICU_SOURCE_DIR}/i18n/udatpg.cpp" +"${ICU_SOURCE_DIR}/i18n/nfrs.cpp" +"${ICU_SOURCE_DIR}/i18n/nfrule.cpp" +"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp" +"${ICU_SOURCE_DIR}/i18n/rbnf.cpp" +"${ICU_SOURCE_DIR}/i18n/numsys.cpp" +"${ICU_SOURCE_DIR}/i18n/unumsys.cpp" +"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp" +"${ICU_SOURCE_DIR}/i18n/ucal.cpp" +"${ICU_SOURCE_DIR}/i18n/calendar.cpp" +"${ICU_SOURCE_DIR}/i18n/gregocal.cpp" +"${ICU_SOURCE_DIR}/i18n/timezone.cpp" +"${ICU_SOURCE_DIR}/i18n/simpletz.cpp" +"${ICU_SOURCE_DIR}/i18n/olsontz.cpp" +"${ICU_SOURCE_DIR}/i18n/astro.cpp" +"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp" +"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp" +"${ICU_SOURCE_DIR}/i18n/persncal.cpp" +"${ICU_SOURCE_DIR}/i18n/islamcal.cpp" +"${ICU_SOURCE_DIR}/i18n/japancal.cpp" +"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp" +"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp" +"${ICU_SOURCE_DIR}/i18n/indiancal.cpp" +"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp" +"${ICU_SOURCE_DIR}/i18n/cecal.cpp" +"${ICU_SOURCE_DIR}/i18n/coptccal.cpp" +"${ICU_SOURCE_DIR}/i18n/dangical.cpp" +"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp" +"${ICU_SOURCE_DIR}/i18n/coleitr.cpp" +"${ICU_SOURCE_DIR}/i18n/coll.cpp" +"${ICU_SOURCE_DIR}/i18n/sortkey.cpp" +"${ICU_SOURCE_DIR}/i18n/bocsu.cpp" +"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp" +"${ICU_SOURCE_DIR}/i18n/collation.cpp" +"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdata.cpp" +"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp" +"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/collationsets.cpp" +"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp" +"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp" +"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp" +"${ICU_SOURCE_DIR}/i18n/collationroot.cpp" +"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp" +"${ICU_SOURCE_DIR}/i18n/collationweights.cpp" +"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp" +"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp" +"${ICU_SOURCE_DIR}/i18n/listformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/strmatch.cpp" +"${ICU_SOURCE_DIR}/i18n/usearch.cpp" +"${ICU_SOURCE_DIR}/i18n/search.cpp" +"${ICU_SOURCE_DIR}/i18n/stsearch.cpp" +"${ICU_SOURCE_DIR}/i18n/translit.cpp" +"${ICU_SOURCE_DIR}/i18n/utrans.cpp" +"${ICU_SOURCE_DIR}/i18n/esctrn.cpp" +"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp" +"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp" +"${ICU_SOURCE_DIR}/i18n/strrepl.cpp" +"${ICU_SOURCE_DIR}/i18n/tridpars.cpp" +"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp" +"${ICU_SOURCE_DIR}/i18n/nultrans.cpp" +"${ICU_SOURCE_DIR}/i18n/remtrans.cpp" +"${ICU_SOURCE_DIR}/i18n/casetrn.cpp" +"${ICU_SOURCE_DIR}/i18n/titletrn.cpp" +"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp" +"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp" +"${ICU_SOURCE_DIR}/i18n/anytrans.cpp" +"${ICU_SOURCE_DIR}/i18n/name2uni.cpp" +"${ICU_SOURCE_DIR}/i18n/uni2name.cpp" +"${ICU_SOURCE_DIR}/i18n/nortrans.cpp" +"${ICU_SOURCE_DIR}/i18n/quant.cpp" +"${ICU_SOURCE_DIR}/i18n/transreg.cpp" +"${ICU_SOURCE_DIR}/i18n/brktrans.cpp" +"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp" +"${ICU_SOURCE_DIR}/i18n/rematch.cpp" +"${ICU_SOURCE_DIR}/i18n/repattrn.cpp" +"${ICU_SOURCE_DIR}/i18n/regexst.cpp" +"${ICU_SOURCE_DIR}/i18n/regextxt.cpp" +"${ICU_SOURCE_DIR}/i18n/regeximp.cpp" +"${ICU_SOURCE_DIR}/i18n/uregex.cpp" +"${ICU_SOURCE_DIR}/i18n/uregexc.cpp" +"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp" +"${ICU_SOURCE_DIR}/i18n/measfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/currfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/curramt.cpp" +"${ICU_SOURCE_DIR}/i18n/currunit.cpp" +"${ICU_SOURCE_DIR}/i18n/measure.cpp" +"${ICU_SOURCE_DIR}/i18n/utmscale.cpp" +"${ICU_SOURCE_DIR}/i18n/csdetect.cpp" +"${ICU_SOURCE_DIR}/i18n/csmatch.cpp" +"${ICU_SOURCE_DIR}/i18n/csr2022.cpp" +"${ICU_SOURCE_DIR}/i18n/csrecog.cpp" +"${ICU_SOURCE_DIR}/i18n/csrmbcs.cpp" +"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp" +"${ICU_SOURCE_DIR}/i18n/csrucode.cpp" +"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp" +"${ICU_SOURCE_DIR}/i18n/inputext.cpp" +"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/basictz.cpp" +"${ICU_SOURCE_DIR}/i18n/dtrule.cpp" +"${ICU_SOURCE_DIR}/i18n/rbtz.cpp" +"${ICU_SOURCE_DIR}/i18n/tzrule.cpp" +"${ICU_SOURCE_DIR}/i18n/tztrans.cpp" +"${ICU_SOURCE_DIR}/i18n/vtzone.cpp" +"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp" +"${ICU_SOURCE_DIR}/i18n/standardplural.cpp" +"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp" +"${ICU_SOURCE_DIR}/i18n/plurrule.cpp" +"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/selfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp" +"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/tmunit.cpp" +"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp" +"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/currpinf.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp" +"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp" +"${ICU_SOURCE_DIR}/i18n/ztrans.cpp" +"${ICU_SOURCE_DIR}/i18n/zrule.cpp" +"${ICU_SOURCE_DIR}/i18n/vzone.cpp" +"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp" +"${ICU_SOURCE_DIR}/i18n/fpositer.cpp" +"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp" +"${ICU_SOURCE_DIR}/i18n/decNumber.cpp" +"${ICU_SOURCE_DIR}/i18n/decContext.cpp" +"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp" +"${ICU_SOURCE_DIR}/i18n/tznames.cpp" +"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp" +"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/gender.cpp" +"${ICU_SOURCE_DIR}/i18n/region.cpp" +"${ICU_SOURCE_DIR}/i18n/scriptset.cpp" +"${ICU_SOURCE_DIR}/i18n/uregion.cpp" +"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp" +"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/measunit.cpp" +"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp" +"${ICU_SOURCE_DIR}/i18n/nounit.cpp" +"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp" +"${ICU_SOURCE_DIR}/i18n/number_compact.cpp" +"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp" +"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp" +"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp" +"${ICU_SOURCE_DIR}/i18n/number_formatimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp" +"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp" +"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp" +"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp" +"${ICU_SOURCE_DIR}/i18n/number_notation.cpp" +"${ICU_SOURCE_DIR}/i18n/number_output.cpp" +"${ICU_SOURCE_DIR}/i18n/number_padding.cpp" +"${ICU_SOURCE_DIR}/i18n/number_patternmodifier.cpp" +"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp" +"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp" +"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp" +"${ICU_SOURCE_DIR}/i18n/number_utils.cpp" +"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp" +"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp" +"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp" +"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp" +"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp" +"${ICU_SOURCE_DIR}/i18n/number_capi.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp" +"${ICU_SOURCE_DIR}/i18n/string_segment.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp" +"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp" +"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/erarules.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp") -file(GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp CONTENT " ") +file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") enable_language(ASM) set(ICUDATA_SOURCES - ${ICUDATA_SOURCE_DIR}/icudt66l_dat.S - ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC + "${ICUDATA_SOURCE_DIR}/icudt66l_dat.S" + "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC ) # Note that we don't like any kind of binary plugins (because of runtime dependencies, vulnerabilities, ABI incompatibilities). @@ -454,8 +454,8 @@ add_library(icudata ${ICUDATA_SOURCES}) target_link_libraries(icuuc PRIVATE icudata) target_link_libraries(icui18n PRIVATE icuuc) -target_include_directories(icuuc SYSTEM PUBLIC ${ICU_SOURCE_DIR}/common/) -target_include_directories(icui18n SYSTEM PUBLIC ${ICU_SOURCE_DIR}/i18n/) +target_include_directories(icuuc SYSTEM PUBLIC "${ICU_SOURCE_DIR}/common/") +target_include_directories(icui18n SYSTEM PUBLIC "${ICU_SOURCE_DIR}/i18n/") target_compile_definitions(icuuc PRIVATE -DU_COMMON_IMPLEMENTATION) target_compile_definitions(icui18n PRIVATE -DU_I18N_IMPLEMENTATION) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index b8a6474413a..140b7eb370b 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -1,10 +1,13 @@ -if (SANITIZE OR NOT (ARCH_AMD64 OR ARCH_ARM) OR NOT (OS_LINUX OR OS_FREEBSD OR OS_DARWIN)) +if (SANITIZE OR NOT ( + ((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE)) OR + (OS_DARWIN AND CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") +)) if (ENABLE_JEMALLOC) message (${RECONFIGURE_MESSAGE_LEVEL} - "jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64 or aarch64 on linux or freebsd.") - endif() + "jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds.") + endif () set (ENABLE_JEMALLOC OFF) -else() +else () option (ENABLE_JEMALLOC "Enable jemalloc allocator" ${ENABLE_LIBRARIES}) endif () @@ -34,9 +37,9 @@ if (OS_LINUX) # avoid spurious latencies and additional work associated with # MADV_DONTNEED. See # https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation. - set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:10000") + set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000") else() - set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:10000") + set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000") endif() # CACHE variable is empty, to allow changing defaults without necessity # to purge cache @@ -49,46 +52,46 @@ message (STATUS "jemalloc malloc_conf: ${JEMALLOC_CONFIG_MALLOC_CONF}") set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/jemalloc") set (SRCS - ${LIBRARY_DIR}/src/arena.c - ${LIBRARY_DIR}/src/background_thread.c - ${LIBRARY_DIR}/src/base.c - ${LIBRARY_DIR}/src/bin.c - ${LIBRARY_DIR}/src/bitmap.c - ${LIBRARY_DIR}/src/ckh.c - ${LIBRARY_DIR}/src/ctl.c - ${LIBRARY_DIR}/src/div.c - ${LIBRARY_DIR}/src/extent.c - ${LIBRARY_DIR}/src/extent_dss.c - ${LIBRARY_DIR}/src/extent_mmap.c - ${LIBRARY_DIR}/src/hash.c - ${LIBRARY_DIR}/src/hook.c - ${LIBRARY_DIR}/src/jemalloc.c - ${LIBRARY_DIR}/src/large.c - ${LIBRARY_DIR}/src/log.c - ${LIBRARY_DIR}/src/malloc_io.c - ${LIBRARY_DIR}/src/mutex.c - ${LIBRARY_DIR}/src/mutex_pool.c - ${LIBRARY_DIR}/src/nstime.c - ${LIBRARY_DIR}/src/pages.c - ${LIBRARY_DIR}/src/prng.c - ${LIBRARY_DIR}/src/prof.c - ${LIBRARY_DIR}/src/rtree.c - ${LIBRARY_DIR}/src/sc.c - ${LIBRARY_DIR}/src/stats.c - ${LIBRARY_DIR}/src/sz.c - ${LIBRARY_DIR}/src/tcache.c - ${LIBRARY_DIR}/src/test_hooks.c - ${LIBRARY_DIR}/src/ticker.c - ${LIBRARY_DIR}/src/tsd.c - ${LIBRARY_DIR}/src/witness.c - ${LIBRARY_DIR}/src/safety_check.c + "${LIBRARY_DIR}/src/arena.c" + "${LIBRARY_DIR}/src/background_thread.c" + "${LIBRARY_DIR}/src/base.c" + "${LIBRARY_DIR}/src/bin.c" + "${LIBRARY_DIR}/src/bitmap.c" + "${LIBRARY_DIR}/src/ckh.c" + "${LIBRARY_DIR}/src/ctl.c" + "${LIBRARY_DIR}/src/div.c" + "${LIBRARY_DIR}/src/extent.c" + "${LIBRARY_DIR}/src/extent_dss.c" + "${LIBRARY_DIR}/src/extent_mmap.c" + "${LIBRARY_DIR}/src/hash.c" + "${LIBRARY_DIR}/src/hook.c" + "${LIBRARY_DIR}/src/jemalloc.c" + "${LIBRARY_DIR}/src/large.c" + "${LIBRARY_DIR}/src/log.c" + "${LIBRARY_DIR}/src/malloc_io.c" + "${LIBRARY_DIR}/src/mutex.c" + "${LIBRARY_DIR}/src/mutex_pool.c" + "${LIBRARY_DIR}/src/nstime.c" + "${LIBRARY_DIR}/src/pages.c" + "${LIBRARY_DIR}/src/prng.c" + "${LIBRARY_DIR}/src/prof.c" + "${LIBRARY_DIR}/src/rtree.c" + "${LIBRARY_DIR}/src/sc.c" + "${LIBRARY_DIR}/src/stats.c" + "${LIBRARY_DIR}/src/sz.c" + "${LIBRARY_DIR}/src/tcache.c" + "${LIBRARY_DIR}/src/test_hooks.c" + "${LIBRARY_DIR}/src/ticker.c" + "${LIBRARY_DIR}/src/tsd.c" + "${LIBRARY_DIR}/src/witness.c" + "${LIBRARY_DIR}/src/safety_check.c" ) if (OS_DARWIN) - list(APPEND SRCS ${LIBRARY_DIR}/src/zone.c) + list(APPEND SRCS "${LIBRARY_DIR}/src/zone.c") endif () add_library(jemalloc ${SRCS}) -target_include_directories(jemalloc PRIVATE ${LIBRARY_DIR}/include) +target_include_directories(jemalloc PRIVATE "${LIBRARY_DIR}/include") target_include_directories(jemalloc SYSTEM PUBLIC include) set (JEMALLOC_INCLUDE_PREFIX) @@ -107,6 +110,8 @@ if (ARCH_AMD64) set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_x86_64") elseif (ARCH_ARM) set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_aarch64") +elseif (ARCH_PPC64LE) + set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le") else () message (FATAL_ERROR "internal jemalloc: This arch is not supported") endif () @@ -114,17 +119,19 @@ endif () configure_file(${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal/jemalloc_internal_defs.h.in ${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal/jemalloc_internal_defs.h) target_include_directories(jemalloc SYSTEM PRIVATE - ${CMAKE_CURRENT_BINARY_DIR}/${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal) + "${CMAKE_CURRENT_BINARY_DIR}/${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal") target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE) if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") - target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_DEBUG=1 -DJEMALLOC_PROF=1) + target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_DEBUG=1) +endif () - if (USE_UNWIND) - target_compile_definitions (jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1) - target_link_libraries (jemalloc PRIVATE unwind) - endif () +target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_PROF=1) + +if (USE_UNWIND) + target_compile_definitions (jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1) + target_link_libraries (jemalloc PRIVATE unwind) endif () target_compile_options(jemalloc PRIVATE -Wno-redundant-decls) diff --git a/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in index c7c884d0eaa..5c0407db24a 100644 --- a/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in @@ -42,7 +42,7 @@ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 * bits are the same as bit 47. */ -#define LG_VADDR 48 +#define LG_VADDR 64 /* Defined if C11 atomics are available. */ #define JEMALLOC_C11_ATOMICS 1 @@ -101,11 +101,6 @@ */ #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 -/* - * Defined if clock_gettime(CLOCK_REALTIME, ...) is available. - */ -#define JEMALLOC_HAVE_CLOCK_REALTIME 1 - /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc @@ -181,14 +176,14 @@ /* #undef LG_QUANTUM */ /* One page is 2^LG_PAGE bytes. */ -#define LG_PAGE 16 +#define LG_PAGE 14 /* * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * system does not explicitly support huge pages; system calls that require * explicit huge page support are separately configured. */ -#define LG_HUGEPAGE 29 +#define LG_HUGEPAGE 21 /* * If defined, adjacent virtual memory mappings with identical attributes @@ -356,7 +351,7 @@ /* #undef JEMALLOC_EXPORT */ /* config.malloc_conf options string. */ -#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@" +#define JEMALLOC_CONFIG_MALLOC_CONF "" /* If defined, jemalloc takes the malloc/free/etc. symbol names. */ /* #undef JEMALLOC_IS_MALLOC */ diff --git a/contrib/jemalloc-cmake/include_linux_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in new file mode 100644 index 00000000000..8068861041f --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in @@ -0,0 +1,367 @@ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +/* #undef JEMALLOC_PREFIX */ +/* #undef JEMALLOC_CPREFIX */ + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE___LIBC_CALLOC +#define JEMALLOC_OVERRIDE___LIBC_FREE +#define JEMALLOC_OVERRIDE___LIBC_MALLOC +#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN +#define JEMALLOC_OVERRIDE___LIBC_REALLOC +#define JEMALLOC_OVERRIDE___LIBC_VALLOC +/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */ + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE je_ + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#define CPU_SPINWAIT +/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ +#define HAVE_CPU_SPINWAIT 0 + +/* + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. + */ +#define LG_VADDR 64 + +/* Defined if C11 atomics are available. */ +#define JEMALLOC_C11_ATOMICS 1 + +/* Defined if GCC __atomic atomics are available. */ +#define JEMALLOC_GCC_ATOMIC_ATOMICS 1 +/* and the 8-bit variant support. */ +#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 + +/* Defined if GCC __sync atomics are available. */ +#define JEMALLOC_GCC_SYNC_ATOMICS 1 +/* and the 8-bit variant support. */ +#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#define JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +/* #undef JEMALLOC_OS_UNFAIR_LOCK */ + +/* Defined if syscall(2) is usable. */ +#define JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +// #define JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +/* #undef JEMALLOC_HAVE_ISSETUGID */ + +/* Defined if pthread_atfork(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_ATFORK + +/* Defined if pthread_setname_np(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1 + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 + +/* + * Defined if mach_absolute_time() is available. + */ +/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#define JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +/* #undef JEMALLOC_MUTEX_INIT_CB */ + +/* Non-empty if the tls_model attribute is supported. */ +#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +/* #undef JEMALLOC_DEBUG */ + +/* JEMALLOC_STATS enables statistics calculation. */ +#define JEMALLOC_STATS + +/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */ +/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */ + +/* JEMALLOC_PROF enables allocation profiling. */ +/* #undef JEMALLOC_PROF */ + +/* Use libunwind for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBUNWIND */ + +/* Use libgcc for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBGCC */ + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage + * segment (DSS). + */ +#define JEMALLOC_DSS + +/* Support memory filling (junk/zero). */ +#define JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +/* #undef JEMALLOC_UTRACE */ + +/* Support optional abort() on OOM. */ +/* #undef JEMALLOC_XMALLOC */ + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +/* #undef JEMALLOC_LAZY_LOCK */ + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +/* #undef LG_QUANTUM */ + +/* One page is 2^LG_PAGE bytes. */ +#define LG_PAGE 16 + +/* + * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the + * system does not explicitly support huge pages; system calls that require + * explicit huge page support are separately configured. + */ +#define LG_HUGEPAGE 21 + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#define JEMALLOC_MAPS_COALESCE + +/* + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because + * common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. + */ +#define JEMALLOC_RETAIN + +/* TLS is used to map arenas and magazine caches to threads. */ +#define JEMALLOC_TLS + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll +#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl +#define JEMALLOC_INTERNAL_FFS __builtin_ffs + +/* + * popcount*() functions to use for bitmapping. + */ +#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl +#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#define JEMALLOC_CACHE_OBLIVIOUS + +/* + * If defined, enable logging facilities. We make this a configure option to + * avoid taking extra branches everywhere. + */ +/* #undef JEMALLOC_LOG */ + +/* + * If defined, use readlinkat() (instead of readlink()) to follow + * /etc/malloc_conf. + */ +/* #undef JEMALLOC_READLINKAT */ + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ +#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#define JEMALLOC_HAVE_MADVISE + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +#define JEMALLOC_HAVE_MADVISE_HUGE + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is + * defined, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched; + * otherwise this behaves similarly to + * MADV_FREE, though typically with higher + * system overhead. + */ +#define JEMALLOC_PURGE_MADVISE_FREE +#define JEMALLOC_PURGE_MADVISE_DONTNEED +#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS + +/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ +/* #undef JEMALLOC_DEFINE_MADVISE_FREE */ + +/* + * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. + */ +#define JEMALLOC_MADVISE_DONTDUMP + +/* + * Defined if transparent huge pages (THPs) are supported via the + * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. + */ +/* #undef JEMALLOC_THP */ + +/* Define if operating system has alloca.h header. */ +#define JEMALLOC_HAS_ALLOCA_H 1 + +/* C99 restrict keyword supported. */ +#define JEMALLOC_HAS_RESTRICT 1 + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#define LG_SIZEOF_LONG 3 + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#define LG_SIZEOF_LONG_LONG 3 + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#define JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#define JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* pthread support */ +#define JEMALLOC_HAVE_PTHREAD + +/* dlsym() support */ +#define JEMALLOC_HAVE_DLSYM + +/* Adaptive mutex support in pthreads. */ +#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* GNU specific sched_getcpu support */ +#define JEMALLOC_HAVE_SCHED_GETCPU + +/* GNU specific sched_setaffinity support */ +#define JEMALLOC_HAVE_SCHED_SETAFFINITY + +/* + * If defined, all the features necessary for background threads are present. + */ +#define JEMALLOC_BACKGROUND_THREAD 1 + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +/* #undef JEMALLOC_EXPORT */ + +/* config.malloc_conf options string. */ +#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@" + +/* If defined, jemalloc takes the malloc/free/etc. symbol names. */ +#define JEMALLOC_IS_MALLOC 1 + +/* + * Defined if strerror_r returns char * if _GNU_SOURCE is defined. + */ +#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE + +/* Performs additional safety checks when defined. */ +/* #undef JEMALLOC_OPT_SAFETY_CHECKS */ + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/contrib/krb5-cmake/CMakeLists.txt b/contrib/krb5-cmake/CMakeLists.txt index fce7fbc582a..7c750ca12b6 100644 --- a/contrib/krb5-cmake/CMakeLists.txt +++ b/contrib/krb5-cmake/CMakeLists.txt @@ -3,465 +3,465 @@ if(NOT AWK_PROGRAM) message(FATAL_ERROR "You need the awk program to build ClickHouse with krb5 enabled.") endif() -set(KRB5_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/krb5/src) +set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src") set(ALL_SRCS - ${KRB5_SOURCE_DIR}/util/et/et_name.c - ${KRB5_SOURCE_DIR}/util/et/com_err.c - ${KRB5_SOURCE_DIR}/util/et/error_message.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_names.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unwrap_aead.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_name_attr.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_glue.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/gssd_pname_to_uid.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_authorize_localname.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_prf.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred_with_pw.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_cred_option.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_map_name_to_any.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_seal.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_delete_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_context_time.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_get_name_attr.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mech_invoke.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unwrap_iov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_exp_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_init_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_accept_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_verify.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_sign.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mechname.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mechattr.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_complete_auth_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_wrap_aead.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_cred_oid.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_buffer.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_initialize.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_name_comp.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_context_option.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred_imp_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_neg_mechs.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_oid_ops.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_context_oid.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_del_name_attr.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_decapsulate_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_compare_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_name_mapping.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dup_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_wrap_iov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_oid_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unseal.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_store_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_buffer_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_canon_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_status.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_name_ext.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_saslname.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_process_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_encapsulate_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_negoex.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/delete_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/lucid_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/duplicate_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_names.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/prf.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealv3iov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/store_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/naming_exts.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/s4u_gss_glue.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5unsealiov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_krb5.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/disp_status.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5seal.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/accept_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/process_context_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/disp_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/wrap_size_limit.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/krb5_gss_glue.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_crypt.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_ccache.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_oid.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/val_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/context_time.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/cred_store.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/iakerb.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/copy_ccache.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/init_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/indicate_mechs.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_seed.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_seqnum.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/compare_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/ser_sctx.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealv3.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/acquire_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5unseal.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_cksum.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_com_err_status.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_generic.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_oid_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/oid_ops.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c + "${KRB5_SOURCE_DIR}/util/et/et_name.c" + "${KRB5_SOURCE_DIR}/util/et/com_err.c" + "${KRB5_SOURCE_DIR}/util/et/error_message.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_names.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unwrap_aead.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_name_attr.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_glue.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/gssd_pname_to_uid.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_authorize_localname.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_prf.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred_with_pw.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_cred_option.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_map_name_to_any.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_seal.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_delete_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_context_time.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_get_name_attr.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mech_invoke.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unwrap_iov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_exp_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_init_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_accept_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_verify.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_sign.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mechname.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mechattr.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_complete_auth_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_wrap_aead.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_cred_oid.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_buffer.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_initialize.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_name_comp.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_context_option.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred_imp_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_neg_mechs.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_oid_ops.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_context_oid.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_del_name_attr.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_decapsulate_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_compare_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_name_mapping.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dup_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_wrap_iov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_oid_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unseal.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_store_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_buffer_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_canon_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_status.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_name_ext.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_saslname.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_process_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_encapsulate_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_negoex.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/delete_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/lucid_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/duplicate_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_names.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/prf.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealv3iov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/store_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/naming_exts.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/s4u_gss_glue.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5unsealiov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_krb5.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/disp_status.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5seal.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/accept_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/process_context_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/disp_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/wrap_size_limit.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/krb5_gss_glue.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_crypt.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_ccache.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_oid.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/val_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/context_time.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/cred_store.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/iakerb.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/copy_ccache.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/init_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/indicate_mechs.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_seed.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_seqnum.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/compare_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/ser_sctx.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealv3.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/acquire_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5unseal.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_cksum.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_com_err_status.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_generic.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_oid_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/oid_ops.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c" - ${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c - ${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c - ${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_ctx.c + "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_ctx.c" - # ${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_trace.c + # "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_trace.c" - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prng.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_cmac.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/crc32.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_cbc.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enctype_util.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_etm.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/combine_keys.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/default_state.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/decrypt_iov.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_dk_cmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/etypes.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/old_api_glue.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/cksumtypes.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_cmac.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_old.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/decrypt.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_dk.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_des.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_unkeyed.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/crypto_length.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/block_size.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/string_to_key.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/verify_checksum.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/crypto_libinit.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/derive.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/random_to_key.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/verify_checksum_iov.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_confounder.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_length.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_hmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/make_checksum.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_des.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/coll_proof_cksum.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_rc4.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/cf2.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/aead.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt_iov.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/cksumtype_to_string.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/key.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_raw.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/keylengths.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_hmac_md5.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/keyed_cksum.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/keyed_checksum_types.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_aes2.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/state.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_dk_hmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_etm.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/make_random_key.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/string_to_cksumtype.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/mandatory_sumtype.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/make_checksum_iov.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_rc4.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/valid_cksumtype.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/nfold.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prng_fortuna.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt_length.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/cmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/keyblocks.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_rc4.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_pbkdf2.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/aes.c - # ${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/rc4.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des3.c - #${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/camellia.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/sha256.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/hmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/pbkdf2.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/init.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/stubs.c - # ${KRB5_SOURCE_DIR}/lib/crypto/openssl/hash_provider/hash_crc32.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/hash_provider/hash_evp.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/des/des_keys.c - ${KRB5_SOURCE_DIR}/util/support/fake-addrinfo.c - ${KRB5_SOURCE_DIR}/util/support/k5buf.c - ${KRB5_SOURCE_DIR}/util/support/hex.c - ${KRB5_SOURCE_DIR}/util/support/threads.c - ${KRB5_SOURCE_DIR}/util/support/utf8.c - ${KRB5_SOURCE_DIR}/util/support/hashtab.c - ${KRB5_SOURCE_DIR}/util/support/dir_filenames.c - ${KRB5_SOURCE_DIR}/util/support/base64.c - ${KRB5_SOURCE_DIR}/util/support/strerror_r.c - ${KRB5_SOURCE_DIR}/util/support/plugins.c - ${KRB5_SOURCE_DIR}/util/support/path.c - ${KRB5_SOURCE_DIR}/util/support/init-addrinfo.c - ${KRB5_SOURCE_DIR}/util/support/json.c - ${KRB5_SOURCE_DIR}/util/support/errors.c - ${KRB5_SOURCE_DIR}/util/support/utf8_conv.c - ${KRB5_SOURCE_DIR}/util/support/strlcpy.c - ${KRB5_SOURCE_DIR}/util/support/gmt_mktime.c - ${KRB5_SOURCE_DIR}/util/support/zap.c - ${KRB5_SOURCE_DIR}/util/support/bcmp.c - ${KRB5_SOURCE_DIR}/util/support/secure_getenv.c - ${KRB5_SOURCE_DIR}/util/profile/prof_tree.c - ${KRB5_SOURCE_DIR}/util/profile/prof_file.c - ${KRB5_SOURCE_DIR}/util/profile/prof_parse.c - ${KRB5_SOURCE_DIR}/util/profile/prof_get.c - ${KRB5_SOURCE_DIR}/util/profile/prof_set.c - ${KRB5_SOURCE_DIR}/util/profile/prof_err.c - ${KRB5_SOURCE_DIR}/util/profile/prof_init.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_adata.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_tick.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/enc_keyhelper.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_actx.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/init_ctx.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth2.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/parse_host_string.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/pr_to_salt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_req.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/pac_sign.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_addrs.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_rep.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/str_conv.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_opt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/recvauth.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_cksum.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ai_authdata.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_ctx.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/appdefault.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/bld_princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/in_tkt_sky.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_creds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/auth_con.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_key.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/kdc_rep_dc.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_cred.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_keytab.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_req_dec.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/set_realm.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_sam2.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/libdef_parse.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/privsafe.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_auth.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/val_renew.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_order.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_dec.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/walk_rtree.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_subkey.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_auth.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/chpw.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_req.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/allow_weak.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_rep.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_priv.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/s4u_authdata.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_otp.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/init_keyblock.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_addr.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/encrypt_tk.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/s4u_creds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/srv_dec_tkt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_priv.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_enc.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_exp.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/decode_kdc.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/decrypt_tk.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/enc_helper.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_req_ext.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_key.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_encts.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/send_tgs.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_cksum.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/tgtname.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/encode_kdc.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_cred.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_safe.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_pkinit.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/srv_rcache.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/chk_trans.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/etype_list.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/get_creds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_pwd.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_save_subkey.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/vfy_increds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_comp.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/kfree.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/response_items.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/serialize.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/cammac_util.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gc_via_tkt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_ctx.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/sendauth.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_srch.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_safe.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_ec.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/bld_pr_ext.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/random_str.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/sname_match.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/princ_comp.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/get_in_tkt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_seqnum.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/cp_key_cnt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_error.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_athctr.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/deltat.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/get_etype_info.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/plugin.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/kerrs.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/vic_opt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/unparse.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/parse.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_error.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/pac.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/valid_times.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_data.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/padata.c + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prng.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_cmac.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/crc32.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_cbc.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enctype_util.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_etm.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/combine_keys.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/default_state.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/decrypt_iov.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_dk_cmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/etypes.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/old_api_glue.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/cksumtypes.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_cmac.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_old.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/decrypt.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_dk.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_des.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_unkeyed.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/crypto_length.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/block_size.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/string_to_key.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/verify_checksum.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/crypto_libinit.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/derive.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/random_to_key.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/verify_checksum_iov.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_confounder.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_length.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_hmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/make_checksum.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_des.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/coll_proof_cksum.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_rc4.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/cf2.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/aead.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt_iov.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/cksumtype_to_string.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/key.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_raw.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/keylengths.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_hmac_md5.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/keyed_cksum.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/keyed_checksum_types.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_aes2.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/state.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_dk_hmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_etm.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/make_random_key.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/string_to_cksumtype.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/mandatory_sumtype.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/make_checksum_iov.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_rc4.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/valid_cksumtype.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/nfold.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prng_fortuna.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt_length.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/cmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/keyblocks.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_rc4.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_pbkdf2.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/aes.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/rc4.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des3.c" + #"${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/camellia.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/sha256.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/hmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/pbkdf2.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/init.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/stubs.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/openssl/hash_provider/hash_crc32.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/hash_provider/hash_evp.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/des/des_keys.c" + "${KRB5_SOURCE_DIR}/util/support/fake-addrinfo.c" + "${KRB5_SOURCE_DIR}/util/support/k5buf.c" + "${KRB5_SOURCE_DIR}/util/support/hex.c" + "${KRB5_SOURCE_DIR}/util/support/threads.c" + "${KRB5_SOURCE_DIR}/util/support/utf8.c" + "${KRB5_SOURCE_DIR}/util/support/hashtab.c" + "${KRB5_SOURCE_DIR}/util/support/dir_filenames.c" + "${KRB5_SOURCE_DIR}/util/support/base64.c" + "${KRB5_SOURCE_DIR}/util/support/strerror_r.c" + "${KRB5_SOURCE_DIR}/util/support/plugins.c" + "${KRB5_SOURCE_DIR}/util/support/path.c" + "${KRB5_SOURCE_DIR}/util/support/init-addrinfo.c" + "${KRB5_SOURCE_DIR}/util/support/json.c" + "${KRB5_SOURCE_DIR}/util/support/errors.c" + "${KRB5_SOURCE_DIR}/util/support/utf8_conv.c" + "${KRB5_SOURCE_DIR}/util/support/strlcpy.c" + "${KRB5_SOURCE_DIR}/util/support/gmt_mktime.c" + "${KRB5_SOURCE_DIR}/util/support/zap.c" + "${KRB5_SOURCE_DIR}/util/support/bcmp.c" + "${KRB5_SOURCE_DIR}/util/support/secure_getenv.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_tree.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_file.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_parse.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_get.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_set.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_err.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_init.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_adata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_tick.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/enc_keyhelper.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_actx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/init_ctx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth2.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/parse_host_string.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/pr_to_salt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_req.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/pac_sign.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_addrs.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_rep.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/str_conv.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_opt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/recvauth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_cksum.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ai_authdata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_ctx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/appdefault.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/bld_princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/in_tkt_sky.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_creds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/auth_con.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_key.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/kdc_rep_dc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_cred.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_keytab.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_req_dec.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/set_realm.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_sam2.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/libdef_parse.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/privsafe.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_auth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/val_renew.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_order.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_dec.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/walk_rtree.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_subkey.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_auth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/chpw.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_req.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/allow_weak.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_rep.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_priv.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/s4u_authdata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_otp.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/init_keyblock.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_addr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/encrypt_tk.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/s4u_creds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/srv_dec_tkt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_priv.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_enc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_exp.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/decode_kdc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/decrypt_tk.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/enc_helper.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_req_ext.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_key.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_encts.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/send_tgs.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_cksum.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/tgtname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/encode_kdc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_cred.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_safe.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_pkinit.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/srv_rcache.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/chk_trans.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/etype_list.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/get_creds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_pwd.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_save_subkey.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/vfy_increds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_comp.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/kfree.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/response_items.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/serialize.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/cammac_util.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gc_via_tkt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_ctx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/sendauth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_srch.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_safe.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_ec.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/bld_pr_ext.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/random_str.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/sname_match.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/princ_comp.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/get_in_tkt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_seqnum.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/cp_key_cnt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_error.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_athctr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/deltat.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/get_etype_info.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/plugin.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/kerrs.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/vic_opt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/unparse.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/parse.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_error.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/pac.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/valid_times.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_data.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/padata.c" - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/thread_safe.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/krbfileio.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/toffset.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostaddr.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/ustime.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/timeofday.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/ccdefname.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/full_ipadr.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/read_pwd.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/trace.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_k5login.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_rule.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localaddr.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_dns.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_domain.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/sn2princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/net_write.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/gen_rname.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/net_read.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/accessor.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_profile.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/c_ustime.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/expand_path.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/port2ip.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/changepw.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/unlck_file.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/gen_port.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_an2ln.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/genaddrs.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/init_os_ctx.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/locate_kdc.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/prompter.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/ktdefname.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/realm_dom.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/dnssrv.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/mk_faddr.c - # ${KRB5_SOURCE_DIR}/lib/krb5/os/dnsglue.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/sendto_kdc.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_registry.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/write_msg.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_names.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/read_msg.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/lock_file.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_realm.c - # ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ser_cc.c + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/thread_safe.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/krbfileio.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/toffset.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostaddr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/ustime.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/timeofday.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/ccdefname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/full_ipadr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/read_pwd.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/trace.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_k5login.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_rule.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localaddr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_dns.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_domain.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/sn2princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/net_write.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/gen_rname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/net_read.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/accessor.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_profile.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/c_ustime.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/expand_path.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/port2ip.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/changepw.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/unlck_file.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/gen_port.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_an2ln.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/genaddrs.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/init_os_ctx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/locate_kdc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/prompter.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/ktdefname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/realm_dom.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/dnssrv.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/mk_faddr.c" + # "${KRB5_SOURCE_DIR}/lib/krb5/os/dnsglue.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/sendto_kdc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_registry.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/write_msg.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_names.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/read_msg.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/lock_file.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_realm.c" + # "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ser_cc.c" - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccdefops.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_retr.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_k5identity.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cccopy.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccfns.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_file.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccbase.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cccursor.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccdefault.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_memory.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccmarshal.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_hostname.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_dir.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_keyring.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_kcm.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktadd.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktbase.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktdefault.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/kt_memory.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktfns.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktremove.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/read_servi.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/kt_file.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/read_servi.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktfr_entry.c + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccdefops.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_retr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_k5identity.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cccopy.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccfns.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_file.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccbase.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cccursor.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccdefault.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_memory.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccmarshal.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_hostname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_dir.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_keyring.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_kcm.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktadd.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktbase.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktdefault.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/kt_memory.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktfns.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktremove.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/read_servi.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/kt_file.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/read_servi.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktfr_entry.c" - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c" - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_dfl.c - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_file2.c - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_none.c - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/memrcache.c - ${KRB5_SOURCE_DIR}/lib/krb5/unicode/ucdata/ucdata.c - ${KRB5_SOURCE_DIR}/lib/krb5/unicode/ucstr.c - ${KRB5_SOURCE_DIR}/lib/krb5/asn.1/asn1_encode.c - ${KRB5_SOURCE_DIR}/lib/krb5/asn.1/asn1_k_encode.c - ${KRB5_SOURCE_DIR}/lib/krb5/asn.1/ldap_key_seq.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb5_libinit.c + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_dfl.c" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_file2.c" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_none.c" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/memrcache.c" + "${KRB5_SOURCE_DIR}/lib/krb5/unicode/ucdata/ucdata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/unicode/ucstr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/asn.1/asn1_encode.c" + "${KRB5_SOURCE_DIR}/lib/krb5/asn.1/asn1_k_encode.c" + "${KRB5_SOURCE_DIR}/lib/krb5/asn.1/ldap_key_seq.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb5_libinit.c" ) add_custom_command( - OUTPUT ${KRB5_SOURCE_DIR}/util/et/compile_et + OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et" COMMAND /bin/sh ./config_script ./compile_et.sh @@ -470,7 +470,7 @@ add_custom_command( sed > compile_et - DEPENDS ${KRB5_SOURCE_DIR}/util/et/compile_et.sh ${KRB5_SOURCE_DIR}/util/et/config_script + DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script" WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et" ) @@ -497,8 +497,8 @@ function(preprocess_et out_var) get_filename_component(ET_PATH ${in_f} DIRECTORY) add_custom_command(OUTPUT ${F_C} ${F_H} - COMMAND perl ${KRB5_SOURCE_DIR}/util/et/compile_et -d "${KRB5_SOURCE_DIR}/util/et" ${in_f} - DEPENDS ${in_f} ${KRB5_SOURCE_DIR}/util/et/compile_et + COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f} + DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et" WORKING_DIRECTORY ${ET_PATH} COMMENT "Creating preprocessed file ${F_C}" VERBATIM @@ -509,7 +509,7 @@ function(preprocess_et out_var) endfunction() add_custom_command( - OUTPUT ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h + OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" COMMAND perl -I../../../util ../../../util/gen-map.pl @@ -525,27 +525,27 @@ add_custom_command( add_custom_target( ERROR_MAP_H - DEPENDS ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h + DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" COMMENT "generating error_map.h" VERBATIM ) add_custom_command( - OUTPUT ${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h + OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic" ) add_custom_target( ERRMAP_H - DEPENDS ${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h + DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" COMMENT "generating errmap.h" VERBATIM ) add_custom_target( KRB_5_H - DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h + DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" COMMENT "generating krb5.h" VERBATIM ) @@ -563,12 +563,12 @@ preprocess_et(processed_et_files ${ET_FILES}) if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.h ${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c - COMMAND mig -header kcmrpc.h -user kcmrpc.c -sheader /dev/null -server /dev/null -I${KRB5_SOURCE_DIR}/lib/krb5/ccache ${KRB5_SOURCE_DIR}/lib/krb5/ccache/kcmrpc.defs + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.h" "${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c" + COMMAND mig -header kcmrpc.h -user kcmrpc.c -sheader /dev/null -server /dev/null -I"${KRB5_SOURCE_DIR}/lib/krb5/ccache" "${KRB5_SOURCE_DIR}/lib/krb5/ccache/kcmrpc.defs" WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/include_private" ) - list(APPEND ALL_SRCS ${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c) + list(APPEND ALL_SRCS "${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c") endif() target_sources(${KRB5_LIBRARY} PRIVATE @@ -576,98 +576,98 @@ target_sources(${KRB5_LIBRARY} PRIVATE ) file(MAKE_DIRECTORY - ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi + "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi" ) file(GLOB GSSAPI_GENERIC_HEADERS - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/*.h - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi.hin + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/*.h" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi.hin" ) file(COPY ${GSSAPI_GENERIC_HEADERS} - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/ + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/" ) file(RENAME - ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/gssapi.hin - ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/gssapi.h + "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/gssapi.hin" + "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/gssapi.h" ) -file(COPY ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_krb5.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/ +file(COPY "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_krb5.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/" ) -file(COPY ${KRB5_SOURCE_DIR}/util/et/com_err.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/ +file(COPY "${KRB5_SOURCE_DIR}/util/et/com_err.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include/" ) -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/osconf.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include_private/ +file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/osconf.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include_private/" ) -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/profile.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include_private/ +file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/profile.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include_private/" ) string(TOLOWER "${CMAKE_SYSTEM_NAME}" _system_name) -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/autoconf_${_system_name}.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include_private/ +file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/autoconf_${_system_name}.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include_private/" ) file(RENAME - ${CMAKE_CURRENT_BINARY_DIR}/include_private/autoconf_${_system_name}.h - ${CMAKE_CURRENT_BINARY_DIR}/include_private/autoconf.h + "${CMAKE_CURRENT_BINARY_DIR}/include_private/autoconf_${_system_name}.h" + "${CMAKE_CURRENT_BINARY_DIR}/include_private/autoconf.h" ) file(MAKE_DIRECTORY - ${CMAKE_CURRENT_BINARY_DIR}/include/krb5 + "${CMAKE_CURRENT_BINARY_DIR}/include/krb5" ) SET(KRBHDEP - ${KRB5_SOURCE_DIR}/include/krb5/krb5.hin - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h + "${KRB5_SOURCE_DIR}/include/krb5/krb5.hin" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h" ) # cmake < 3.18 does not have 'cat' command add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h - COMMAND cat ${KRBHDEP} > ${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" + COMMAND cat ${KRBHDEP} > "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" DEPENDS ${KRBHDEP} ) target_include_directories(${KRB5_LIBRARY} PUBLIC - ${KRB5_SOURCE_DIR}/include - ${CMAKE_CURRENT_BINARY_DIR}/include + "${KRB5_SOURCE_DIR}/include" + "${CMAKE_CURRENT_BINARY_DIR}/include" ) target_include_directories(${KRB5_LIBRARY} PRIVATE - ${CMAKE_CURRENT_BINARY_DIR}/include_private # For autoconf.h and other generated headers. + "${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers. ${KRB5_SOURCE_DIR} - ${KRB5_SOURCE_DIR}/include - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue - ${KRB5_SOURCE_DIR}/lib/ - ${KRB5_SOURCE_DIR}/lib/gssapi - ${KRB5_SOURCE_DIR}/lib/gssapi/generic - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5 - ${KRB5_SOURCE_DIR}/lib/gssapi/spnego - ${KRB5_SOURCE_DIR}/util/et - ${KRB5_SOURCE_DIR}/lib/crypto/openssl - ${KRB5_SOURCE_DIR}/lib/crypto/krb - ${KRB5_SOURCE_DIR}/util/profile - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccapi - ${KRB5_SOURCE_DIR}/lib/krb5/ccache - ${KRB5_SOURCE_DIR}/lib/krb5/keytab - ${KRB5_SOURCE_DIR}/lib/krb5/rcache - ${KRB5_SOURCE_DIR}/lib/krb5/unicode - ${KRB5_SOURCE_DIR}/lib/krb5/os + "${KRB5_SOURCE_DIR}/include" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue" + "${KRB5_SOURCE_DIR}/lib/" + "${KRB5_SOURCE_DIR}/lib/gssapi" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5" + "${KRB5_SOURCE_DIR}/lib/gssapi/spnego" + "${KRB5_SOURCE_DIR}/util/et" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl" + "${KRB5_SOURCE_DIR}/lib/crypto/krb" + "${KRB5_SOURCE_DIR}/util/profile" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccapi" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache" + "${KRB5_SOURCE_DIR}/lib/krb5/unicode" + "${KRB5_SOURCE_DIR}/lib/krb5/os" # ${OPENSSL_INCLUDE_DIR} ) diff --git a/contrib/libcpuid-cmake/CMakeLists.txt b/contrib/libcpuid-cmake/CMakeLists.txt index 8c1be50b4e6..9baebb3ba1b 100644 --- a/contrib/libcpuid-cmake/CMakeLists.txt +++ b/contrib/libcpuid-cmake/CMakeLists.txt @@ -1,11 +1,9 @@ -if (NOT ARCH_ARM) +if(ARCH_AMD64) option (ENABLE_CPUID "Enable libcpuid library (only internal)" ${ENABLE_LIBRARIES}) -endif() - -if (ARCH_ARM AND ENABLE_CPUID) - message (${RECONFIGURE_MESSAGE_LEVEL} "cpuid is not supported on ARM") +elseif(ENABLE_CPUID) + message (${RECONFIGURE_MESSAGE_LEVEL} "libcpuid is only supported on x86_64") set (ENABLE_CPUID 0) -endif () +endif() if (NOT ENABLE_CPUID) add_library (cpuid INTERFACE) diff --git a/contrib/libcxx b/contrib/libcxx index 8b80a151d12..2fa892f69ac 160000 --- a/contrib/libcxx +++ b/contrib/libcxx @@ -1 +1 @@ -Subproject commit 8b80a151d12b98ffe2d0c22f7cec12c3b9ff88d7 +Subproject commit 2fa892f69acbaa40f8a18c6484854a6183a34482 diff --git a/contrib/libcxx-cmake/CMakeLists.txt b/contrib/libcxx-cmake/CMakeLists.txt index 3b5d53cd1c0..0cfb4191619 100644 --- a/contrib/libcxx-cmake/CMakeLists.txt +++ b/contrib/libcxx-cmake/CMakeLists.txt @@ -1,49 +1,49 @@ include(CheckCXXCompilerFlag) -set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx) +set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxx") set(SRCS -${LIBCXX_SOURCE_DIR}/src/algorithm.cpp -${LIBCXX_SOURCE_DIR}/src/any.cpp -${LIBCXX_SOURCE_DIR}/src/atomic.cpp -${LIBCXX_SOURCE_DIR}/src/barrier.cpp -${LIBCXX_SOURCE_DIR}/src/bind.cpp -${LIBCXX_SOURCE_DIR}/src/charconv.cpp -${LIBCXX_SOURCE_DIR}/src/chrono.cpp -${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp -${LIBCXX_SOURCE_DIR}/src/condition_variable_destructor.cpp -${LIBCXX_SOURCE_DIR}/src/debug.cpp -${LIBCXX_SOURCE_DIR}/src/exception.cpp -${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp -${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp -${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp -${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp -${LIBCXX_SOURCE_DIR}/src/functional.cpp -${LIBCXX_SOURCE_DIR}/src/future.cpp -${LIBCXX_SOURCE_DIR}/src/hash.cpp -${LIBCXX_SOURCE_DIR}/src/ios.cpp -${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp -${LIBCXX_SOURCE_DIR}/src/iostream.cpp -${LIBCXX_SOURCE_DIR}/src/locale.cpp -${LIBCXX_SOURCE_DIR}/src/memory.cpp -${LIBCXX_SOURCE_DIR}/src/mutex.cpp -${LIBCXX_SOURCE_DIR}/src/mutex_destructor.cpp -${LIBCXX_SOURCE_DIR}/src/new.cpp -${LIBCXX_SOURCE_DIR}/src/optional.cpp -${LIBCXX_SOURCE_DIR}/src/random.cpp -${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp -${LIBCXX_SOURCE_DIR}/src/regex.cpp -${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp -${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp -${LIBCXX_SOURCE_DIR}/src/string.cpp -${LIBCXX_SOURCE_DIR}/src/strstream.cpp -${LIBCXX_SOURCE_DIR}/src/system_error.cpp -${LIBCXX_SOURCE_DIR}/src/thread.cpp -${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp -${LIBCXX_SOURCE_DIR}/src/utility.cpp -${LIBCXX_SOURCE_DIR}/src/valarray.cpp -${LIBCXX_SOURCE_DIR}/src/variant.cpp -${LIBCXX_SOURCE_DIR}/src/vector.cpp +"${LIBCXX_SOURCE_DIR}/src/algorithm.cpp" +"${LIBCXX_SOURCE_DIR}/src/any.cpp" +"${LIBCXX_SOURCE_DIR}/src/atomic.cpp" +"${LIBCXX_SOURCE_DIR}/src/barrier.cpp" +"${LIBCXX_SOURCE_DIR}/src/bind.cpp" +"${LIBCXX_SOURCE_DIR}/src/charconv.cpp" +"${LIBCXX_SOURCE_DIR}/src/chrono.cpp" +"${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp" +"${LIBCXX_SOURCE_DIR}/src/condition_variable_destructor.cpp" +"${LIBCXX_SOURCE_DIR}/src/debug.cpp" +"${LIBCXX_SOURCE_DIR}/src/exception.cpp" +"${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp" +"${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp" +"${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp" +"${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp" +"${LIBCXX_SOURCE_DIR}/src/functional.cpp" +"${LIBCXX_SOURCE_DIR}/src/future.cpp" +"${LIBCXX_SOURCE_DIR}/src/hash.cpp" +"${LIBCXX_SOURCE_DIR}/src/ios.cpp" +"${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp" +"${LIBCXX_SOURCE_DIR}/src/iostream.cpp" +"${LIBCXX_SOURCE_DIR}/src/locale.cpp" +"${LIBCXX_SOURCE_DIR}/src/memory.cpp" +"${LIBCXX_SOURCE_DIR}/src/mutex.cpp" +"${LIBCXX_SOURCE_DIR}/src/mutex_destructor.cpp" +"${LIBCXX_SOURCE_DIR}/src/new.cpp" +"${LIBCXX_SOURCE_DIR}/src/optional.cpp" +"${LIBCXX_SOURCE_DIR}/src/random.cpp" +"${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp" +"${LIBCXX_SOURCE_DIR}/src/regex.cpp" +"${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp" +"${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp" +"${LIBCXX_SOURCE_DIR}/src/string.cpp" +"${LIBCXX_SOURCE_DIR}/src/strstream.cpp" +"${LIBCXX_SOURCE_DIR}/src/system_error.cpp" +"${LIBCXX_SOURCE_DIR}/src/thread.cpp" +"${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp" +"${LIBCXX_SOURCE_DIR}/src/utility.cpp" +"${LIBCXX_SOURCE_DIR}/src/valarray.cpp" +"${LIBCXX_SOURCE_DIR}/src/variant.cpp" +"${LIBCXX_SOURCE_DIR}/src/vector.cpp" ) add_library(cxx ${SRCS}) @@ -56,6 +56,11 @@ if (USE_UNWIND) target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1) endif () +# Override the deduced attribute support that causes error. +if (OS_DARWIN AND COMPILER_GCC) + add_compile_definitions(_LIBCPP_INIT_PRIORITY_MAX) +endif () + target_compile_options(cxx PUBLIC $<$:-nostdinc++>) # Third party library may have substandard code. diff --git a/contrib/libcxxabi-cmake/CMakeLists.txt b/contrib/libcxxabi-cmake/CMakeLists.txt index 9d8b94dabf0..0bb5d663633 100644 --- a/contrib/libcxxabi-cmake/CMakeLists.txt +++ b/contrib/libcxxabi-cmake/CMakeLists.txt @@ -1,24 +1,24 @@ -set(LIBCXXABI_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxxabi) +set(LIBCXXABI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi") set(SRCS -${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp -${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp -${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp -${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_demangle.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_exception.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp -${LIBCXXABI_SOURCE_DIR}/src/private_typeinfo.cpp -${LIBCXXABI_SOURCE_DIR}/src/stdlib_typeinfo.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_aux_runtime.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp -${LIBCXXABI_SOURCE_DIR}/src/stdlib_new_delete.cpp +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_demangle.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/private_typeinfo.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_typeinfo.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_aux_runtime.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_new_delete.cpp" ) add_library(cxxabi ${SRCS}) diff --git a/contrib/libhdfs3-cmake/CMake/Options.cmake b/contrib/libhdfs3-cmake/CMake/Options.cmake index d7ccc8b6475..04ab823eedc 100644 --- a/contrib/libhdfs3-cmake/CMake/Options.cmake +++ b/contrib/libhdfs3-cmake/CMake/Options.cmake @@ -22,7 +22,7 @@ ADD_DEFINITIONS(-D_GLIBCXX_USE_NANOSLEEP) TRY_COMPILE(STRERROR_R_RETURN_INT ${CMAKE_CURRENT_BINARY_DIR} - ${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileStrerror.cpp + "${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileStrerror.cpp" CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'" OUTPUT_VARIABLE OUTPUT) @@ -36,13 +36,13 @@ ENDIF(STRERROR_R_RETURN_INT) TRY_COMPILE(HAVE_STEADY_CLOCK ${CMAKE_CURRENT_BINARY_DIR} - ${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileSteadyClock.cpp + "${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileSteadyClock.cpp" CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'" OUTPUT_VARIABLE OUTPUT) TRY_COMPILE(HAVE_NESTED_EXCEPTION ${CMAKE_CURRENT_BINARY_DIR} - ${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileNestedException.cpp + "${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileNestedException.cpp" CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'" OUTPUT_VARIABLE OUTPUT) diff --git a/contrib/libhdfs3-cmake/CMakeLists.txt b/contrib/libhdfs3-cmake/CMakeLists.txt index 60f4376bdea..c9b9179d5e6 100644 --- a/contrib/libhdfs3-cmake/CMakeLists.txt +++ b/contrib/libhdfs3-cmake/CMakeLists.txt @@ -24,9 +24,9 @@ else() endif() # project and source dir -set(HDFS3_ROOT_DIR ${ClickHouse_SOURCE_DIR}/contrib/libhdfs3) -set(HDFS3_SOURCE_DIR ${HDFS3_ROOT_DIR}/src) -set(HDFS3_COMMON_DIR ${HDFS3_SOURCE_DIR}/common) +set(HDFS3_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3") +set(HDFS3_SOURCE_DIR "${HDFS3_ROOT_DIR}/src") +set(HDFS3_COMMON_DIR "${HDFS3_SOURCE_DIR}/common") # module set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake" ${CMAKE_MODULE_PATH}) @@ -35,165 +35,165 @@ include(Options) # source set(PROTO_FILES - #${HDFS3_SOURCE_DIR}/proto/encryption.proto - ${HDFS3_SOURCE_DIR}/proto/ClientDatanodeProtocol.proto - ${HDFS3_SOURCE_DIR}/proto/hdfs.proto - ${HDFS3_SOURCE_DIR}/proto/Security.proto - ${HDFS3_SOURCE_DIR}/proto/ProtobufRpcEngine.proto - ${HDFS3_SOURCE_DIR}/proto/ClientNamenodeProtocol.proto - ${HDFS3_SOURCE_DIR}/proto/IpcConnectionContext.proto - ${HDFS3_SOURCE_DIR}/proto/RpcHeader.proto - ${HDFS3_SOURCE_DIR}/proto/datatransfer.proto + #"${HDFS3_SOURCE_DIR}/proto/encryption.proto" + "${HDFS3_SOURCE_DIR}/proto/ClientDatanodeProtocol.proto" + "${HDFS3_SOURCE_DIR}/proto/hdfs.proto" + "${HDFS3_SOURCE_DIR}/proto/Security.proto" + "${HDFS3_SOURCE_DIR}/proto/ProtobufRpcEngine.proto" + "${HDFS3_SOURCE_DIR}/proto/ClientNamenodeProtocol.proto" + "${HDFS3_SOURCE_DIR}/proto/IpcConnectionContext.proto" + "${HDFS3_SOURCE_DIR}/proto/RpcHeader.proto" + "${HDFS3_SOURCE_DIR}/proto/datatransfer.proto" ) if(USE_PROTOBUF) PROTOBUF_GENERATE_CPP(PROTO_SOURCES PROTO_HEADERS ${PROTO_FILES}) endif() -configure_file(${HDFS3_SOURCE_DIR}/platform.h.in ${CMAKE_CURRENT_BINARY_DIR}/platform.h) +configure_file("${HDFS3_SOURCE_DIR}/platform.h.in" "${CMAKE_CURRENT_BINARY_DIR}/platform.h") set(SRCS - ${HDFS3_SOURCE_DIR}/network/TcpSocket.cpp - ${HDFS3_SOURCE_DIR}/network/DomainSocket.cpp - ${HDFS3_SOURCE_DIR}/network/BufferedSocketReader.cpp - ${HDFS3_SOURCE_DIR}/client/ReadShortCircuitInfo.cpp - ${HDFS3_SOURCE_DIR}/client/Pipeline.cpp - ${HDFS3_SOURCE_DIR}/client/Hdfs.cpp - ${HDFS3_SOURCE_DIR}/client/Packet.cpp - ${HDFS3_SOURCE_DIR}/client/OutputStreamImpl.cpp - ${HDFS3_SOURCE_DIR}/client/KerberosName.cpp - ${HDFS3_SOURCE_DIR}/client/PacketHeader.cpp - ${HDFS3_SOURCE_DIR}/client/LocalBlockReader.cpp - ${HDFS3_SOURCE_DIR}/client/UserInfo.cpp - ${HDFS3_SOURCE_DIR}/client/RemoteBlockReader.cpp - ${HDFS3_SOURCE_DIR}/client/Permission.cpp - ${HDFS3_SOURCE_DIR}/client/FileSystemImpl.cpp - ${HDFS3_SOURCE_DIR}/client/DirectoryIterator.cpp - ${HDFS3_SOURCE_DIR}/client/FileSystemKey.cpp - ${HDFS3_SOURCE_DIR}/client/DataTransferProtocolSender.cpp - ${HDFS3_SOURCE_DIR}/client/LeaseRenewer.cpp - ${HDFS3_SOURCE_DIR}/client/PeerCache.cpp - ${HDFS3_SOURCE_DIR}/client/InputStream.cpp - ${HDFS3_SOURCE_DIR}/client/FileSystem.cpp - ${HDFS3_SOURCE_DIR}/client/InputStreamImpl.cpp - ${HDFS3_SOURCE_DIR}/client/Token.cpp - ${HDFS3_SOURCE_DIR}/client/PacketPool.cpp - ${HDFS3_SOURCE_DIR}/client/OutputStream.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcChannelKey.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcProtocolInfo.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcRemoteCall.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcChannel.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcAuth.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcContentWrapper.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcConfig.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcServerInfo.cpp - ${HDFS3_SOURCE_DIR}/rpc/SaslClient.cpp - ${HDFS3_SOURCE_DIR}/server/Datanode.cpp - ${HDFS3_SOURCE_DIR}/server/LocatedBlocks.cpp - ${HDFS3_SOURCE_DIR}/server/NamenodeProxy.cpp - ${HDFS3_SOURCE_DIR}/server/NamenodeImpl.cpp - ${HDFS3_SOURCE_DIR}/server/NamenodeInfo.cpp - ${HDFS3_SOURCE_DIR}/common/WritableUtils.cpp - ${HDFS3_SOURCE_DIR}/common/ExceptionInternal.cpp - ${HDFS3_SOURCE_DIR}/common/SessionConfig.cpp - ${HDFS3_SOURCE_DIR}/common/StackPrinter.cpp - ${HDFS3_SOURCE_DIR}/common/Exception.cpp - ${HDFS3_SOURCE_DIR}/common/Logger.cpp - ${HDFS3_SOURCE_DIR}/common/CFileWrapper.cpp - ${HDFS3_SOURCE_DIR}/common/XmlConfig.cpp - ${HDFS3_SOURCE_DIR}/common/WriteBuffer.cpp - ${HDFS3_SOURCE_DIR}/common/HWCrc32c.cpp - ${HDFS3_SOURCE_DIR}/common/MappedFileWrapper.cpp - ${HDFS3_SOURCE_DIR}/common/Hash.cpp - ${HDFS3_SOURCE_DIR}/common/SWCrc32c.cpp - ${HDFS3_SOURCE_DIR}/common/Thread.cpp + "${HDFS3_SOURCE_DIR}/network/TcpSocket.cpp" + "${HDFS3_SOURCE_DIR}/network/DomainSocket.cpp" + "${HDFS3_SOURCE_DIR}/network/BufferedSocketReader.cpp" + "${HDFS3_SOURCE_DIR}/client/ReadShortCircuitInfo.cpp" + "${HDFS3_SOURCE_DIR}/client/Pipeline.cpp" + "${HDFS3_SOURCE_DIR}/client/Hdfs.cpp" + "${HDFS3_SOURCE_DIR}/client/Packet.cpp" + "${HDFS3_SOURCE_DIR}/client/OutputStreamImpl.cpp" + "${HDFS3_SOURCE_DIR}/client/KerberosName.cpp" + "${HDFS3_SOURCE_DIR}/client/PacketHeader.cpp" + "${HDFS3_SOURCE_DIR}/client/LocalBlockReader.cpp" + "${HDFS3_SOURCE_DIR}/client/UserInfo.cpp" + "${HDFS3_SOURCE_DIR}/client/RemoteBlockReader.cpp" + "${HDFS3_SOURCE_DIR}/client/Permission.cpp" + "${HDFS3_SOURCE_DIR}/client/FileSystemImpl.cpp" + "${HDFS3_SOURCE_DIR}/client/DirectoryIterator.cpp" + "${HDFS3_SOURCE_DIR}/client/FileSystemKey.cpp" + "${HDFS3_SOURCE_DIR}/client/DataTransferProtocolSender.cpp" + "${HDFS3_SOURCE_DIR}/client/LeaseRenewer.cpp" + "${HDFS3_SOURCE_DIR}/client/PeerCache.cpp" + "${HDFS3_SOURCE_DIR}/client/InputStream.cpp" + "${HDFS3_SOURCE_DIR}/client/FileSystem.cpp" + "${HDFS3_SOURCE_DIR}/client/InputStreamImpl.cpp" + "${HDFS3_SOURCE_DIR}/client/Token.cpp" + "${HDFS3_SOURCE_DIR}/client/PacketPool.cpp" + "${HDFS3_SOURCE_DIR}/client/OutputStream.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcChannelKey.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcProtocolInfo.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcRemoteCall.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcChannel.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcAuth.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcContentWrapper.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcConfig.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcServerInfo.cpp" + "${HDFS3_SOURCE_DIR}/rpc/SaslClient.cpp" + "${HDFS3_SOURCE_DIR}/server/Datanode.cpp" + "${HDFS3_SOURCE_DIR}/server/LocatedBlocks.cpp" + "${HDFS3_SOURCE_DIR}/server/NamenodeProxy.cpp" + "${HDFS3_SOURCE_DIR}/server/NamenodeImpl.cpp" + "${HDFS3_SOURCE_DIR}/server/NamenodeInfo.cpp" + "${HDFS3_SOURCE_DIR}/common/WritableUtils.cpp" + "${HDFS3_SOURCE_DIR}/common/ExceptionInternal.cpp" + "${HDFS3_SOURCE_DIR}/common/SessionConfig.cpp" + "${HDFS3_SOURCE_DIR}/common/StackPrinter.cpp" + "${HDFS3_SOURCE_DIR}/common/Exception.cpp" + "${HDFS3_SOURCE_DIR}/common/Logger.cpp" + "${HDFS3_SOURCE_DIR}/common/CFileWrapper.cpp" + "${HDFS3_SOURCE_DIR}/common/XmlConfig.cpp" + "${HDFS3_SOURCE_DIR}/common/WriteBuffer.cpp" + "${HDFS3_SOURCE_DIR}/common/HWCrc32c.cpp" + "${HDFS3_SOURCE_DIR}/common/MappedFileWrapper.cpp" + "${HDFS3_SOURCE_DIR}/common/Hash.cpp" + "${HDFS3_SOURCE_DIR}/common/SWCrc32c.cpp" + "${HDFS3_SOURCE_DIR}/common/Thread.cpp" - ${HDFS3_SOURCE_DIR}/network/TcpSocket.h - ${HDFS3_SOURCE_DIR}/network/BufferedSocketReader.h - ${HDFS3_SOURCE_DIR}/network/Socket.h - ${HDFS3_SOURCE_DIR}/network/DomainSocket.h - ${HDFS3_SOURCE_DIR}/network/Syscall.h - ${HDFS3_SOURCE_DIR}/client/InputStreamImpl.h - ${HDFS3_SOURCE_DIR}/client/FileSystem.h - ${HDFS3_SOURCE_DIR}/client/ReadShortCircuitInfo.h - ${HDFS3_SOURCE_DIR}/client/InputStreamInter.h - ${HDFS3_SOURCE_DIR}/client/FileSystemImpl.h - ${HDFS3_SOURCE_DIR}/client/PacketPool.h - ${HDFS3_SOURCE_DIR}/client/Pipeline.h - ${HDFS3_SOURCE_DIR}/client/OutputStreamInter.h - ${HDFS3_SOURCE_DIR}/client/RemoteBlockReader.h - ${HDFS3_SOURCE_DIR}/client/Token.h - ${HDFS3_SOURCE_DIR}/client/KerberosName.h - ${HDFS3_SOURCE_DIR}/client/DirectoryIterator.h - ${HDFS3_SOURCE_DIR}/client/hdfs.h - ${HDFS3_SOURCE_DIR}/client/FileSystemStats.h - ${HDFS3_SOURCE_DIR}/client/FileSystemKey.h - ${HDFS3_SOURCE_DIR}/client/DataTransferProtocolSender.h - ${HDFS3_SOURCE_DIR}/client/Packet.h - ${HDFS3_SOURCE_DIR}/client/PacketHeader.h - ${HDFS3_SOURCE_DIR}/client/FileSystemInter.h - ${HDFS3_SOURCE_DIR}/client/LocalBlockReader.h - ${HDFS3_SOURCE_DIR}/client/TokenInternal.h - ${HDFS3_SOURCE_DIR}/client/InputStream.h - ${HDFS3_SOURCE_DIR}/client/PipelineAck.h - ${HDFS3_SOURCE_DIR}/client/BlockReader.h - ${HDFS3_SOURCE_DIR}/client/Permission.h - ${HDFS3_SOURCE_DIR}/client/OutputStreamImpl.h - ${HDFS3_SOURCE_DIR}/client/LeaseRenewer.h - ${HDFS3_SOURCE_DIR}/client/UserInfo.h - ${HDFS3_SOURCE_DIR}/client/PeerCache.h - ${HDFS3_SOURCE_DIR}/client/OutputStream.h - ${HDFS3_SOURCE_DIR}/client/FileStatus.h - ${HDFS3_SOURCE_DIR}/client/DataTransferProtocol.h - ${HDFS3_SOURCE_DIR}/client/BlockLocation.h - ${HDFS3_SOURCE_DIR}/rpc/RpcConfig.h - ${HDFS3_SOURCE_DIR}/rpc/SaslClient.h - ${HDFS3_SOURCE_DIR}/rpc/RpcAuth.h - ${HDFS3_SOURCE_DIR}/rpc/RpcClient.h - ${HDFS3_SOURCE_DIR}/rpc/RpcCall.h - ${HDFS3_SOURCE_DIR}/rpc/RpcContentWrapper.h - ${HDFS3_SOURCE_DIR}/rpc/RpcProtocolInfo.h - ${HDFS3_SOURCE_DIR}/rpc/RpcRemoteCall.h - ${HDFS3_SOURCE_DIR}/rpc/RpcServerInfo.h - ${HDFS3_SOURCE_DIR}/rpc/RpcChannel.h - ${HDFS3_SOURCE_DIR}/rpc/RpcChannelKey.h - ${HDFS3_SOURCE_DIR}/server/BlockLocalPathInfo.h - ${HDFS3_SOURCE_DIR}/server/LocatedBlocks.h - ${HDFS3_SOURCE_DIR}/server/DatanodeInfo.h - ${HDFS3_SOURCE_DIR}/server/RpcHelper.h - ${HDFS3_SOURCE_DIR}/server/ExtendedBlock.h - ${HDFS3_SOURCE_DIR}/server/NamenodeInfo.h - ${HDFS3_SOURCE_DIR}/server/NamenodeImpl.h - ${HDFS3_SOURCE_DIR}/server/LocatedBlock.h - ${HDFS3_SOURCE_DIR}/server/NamenodeProxy.h - ${HDFS3_SOURCE_DIR}/server/Datanode.h - ${HDFS3_SOURCE_DIR}/server/Namenode.h - ${HDFS3_SOURCE_DIR}/common/XmlConfig.h - ${HDFS3_SOURCE_DIR}/common/Logger.h - ${HDFS3_SOURCE_DIR}/common/WriteBuffer.h - ${HDFS3_SOURCE_DIR}/common/HWCrc32c.h - ${HDFS3_SOURCE_DIR}/common/Checksum.h - ${HDFS3_SOURCE_DIR}/common/SessionConfig.h - ${HDFS3_SOURCE_DIR}/common/Unordered.h - ${HDFS3_SOURCE_DIR}/common/BigEndian.h - ${HDFS3_SOURCE_DIR}/common/Thread.h - ${HDFS3_SOURCE_DIR}/common/StackPrinter.h - ${HDFS3_SOURCE_DIR}/common/Exception.h - ${HDFS3_SOURCE_DIR}/common/WritableUtils.h - ${HDFS3_SOURCE_DIR}/common/StringUtil.h - ${HDFS3_SOURCE_DIR}/common/LruMap.h - ${HDFS3_SOURCE_DIR}/common/Function.h - ${HDFS3_SOURCE_DIR}/common/DateTime.h - ${HDFS3_SOURCE_DIR}/common/Hash.h - ${HDFS3_SOURCE_DIR}/common/SWCrc32c.h - ${HDFS3_SOURCE_DIR}/common/ExceptionInternal.h - ${HDFS3_SOURCE_DIR}/common/Memory.h - ${HDFS3_SOURCE_DIR}/common/FileWrapper.h + "${HDFS3_SOURCE_DIR}/network/TcpSocket.h" + "${HDFS3_SOURCE_DIR}/network/BufferedSocketReader.h" + "${HDFS3_SOURCE_DIR}/network/Socket.h" + "${HDFS3_SOURCE_DIR}/network/DomainSocket.h" + "${HDFS3_SOURCE_DIR}/network/Syscall.h" + "${HDFS3_SOURCE_DIR}/client/InputStreamImpl.h" + "${HDFS3_SOURCE_DIR}/client/FileSystem.h" + "${HDFS3_SOURCE_DIR}/client/ReadShortCircuitInfo.h" + "${HDFS3_SOURCE_DIR}/client/InputStreamInter.h" + "${HDFS3_SOURCE_DIR}/client/FileSystemImpl.h" + "${HDFS3_SOURCE_DIR}/client/PacketPool.h" + "${HDFS3_SOURCE_DIR}/client/Pipeline.h" + "${HDFS3_SOURCE_DIR}/client/OutputStreamInter.h" + "${HDFS3_SOURCE_DIR}/client/RemoteBlockReader.h" + "${HDFS3_SOURCE_DIR}/client/Token.h" + "${HDFS3_SOURCE_DIR}/client/KerberosName.h" + "${HDFS3_SOURCE_DIR}/client/DirectoryIterator.h" + "${HDFS3_SOURCE_DIR}/client/hdfs.h" + "${HDFS3_SOURCE_DIR}/client/FileSystemStats.h" + "${HDFS3_SOURCE_DIR}/client/FileSystemKey.h" + "${HDFS3_SOURCE_DIR}/client/DataTransferProtocolSender.h" + "${HDFS3_SOURCE_DIR}/client/Packet.h" + "${HDFS3_SOURCE_DIR}/client/PacketHeader.h" + "${HDFS3_SOURCE_DIR}/client/FileSystemInter.h" + "${HDFS3_SOURCE_DIR}/client/LocalBlockReader.h" + "${HDFS3_SOURCE_DIR}/client/TokenInternal.h" + "${HDFS3_SOURCE_DIR}/client/InputStream.h" + "${HDFS3_SOURCE_DIR}/client/PipelineAck.h" + "${HDFS3_SOURCE_DIR}/client/BlockReader.h" + "${HDFS3_SOURCE_DIR}/client/Permission.h" + "${HDFS3_SOURCE_DIR}/client/OutputStreamImpl.h" + "${HDFS3_SOURCE_DIR}/client/LeaseRenewer.h" + "${HDFS3_SOURCE_DIR}/client/UserInfo.h" + "${HDFS3_SOURCE_DIR}/client/PeerCache.h" + "${HDFS3_SOURCE_DIR}/client/OutputStream.h" + "${HDFS3_SOURCE_DIR}/client/FileStatus.h" + "${HDFS3_SOURCE_DIR}/client/DataTransferProtocol.h" + "${HDFS3_SOURCE_DIR}/client/BlockLocation.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcConfig.h" + "${HDFS3_SOURCE_DIR}/rpc/SaslClient.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcAuth.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcClient.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcCall.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcContentWrapper.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcProtocolInfo.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcRemoteCall.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcServerInfo.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcChannel.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcChannelKey.h" + "${HDFS3_SOURCE_DIR}/server/BlockLocalPathInfo.h" + "${HDFS3_SOURCE_DIR}/server/LocatedBlocks.h" + "${HDFS3_SOURCE_DIR}/server/DatanodeInfo.h" + "${HDFS3_SOURCE_DIR}/server/RpcHelper.h" + "${HDFS3_SOURCE_DIR}/server/ExtendedBlock.h" + "${HDFS3_SOURCE_DIR}/server/NamenodeInfo.h" + "${HDFS3_SOURCE_DIR}/server/NamenodeImpl.h" + "${HDFS3_SOURCE_DIR}/server/LocatedBlock.h" + "${HDFS3_SOURCE_DIR}/server/NamenodeProxy.h" + "${HDFS3_SOURCE_DIR}/server/Datanode.h" + "${HDFS3_SOURCE_DIR}/server/Namenode.h" + "${HDFS3_SOURCE_DIR}/common/XmlConfig.h" + "${HDFS3_SOURCE_DIR}/common/Logger.h" + "${HDFS3_SOURCE_DIR}/common/WriteBuffer.h" + "${HDFS3_SOURCE_DIR}/common/HWCrc32c.h" + "${HDFS3_SOURCE_DIR}/common/Checksum.h" + "${HDFS3_SOURCE_DIR}/common/SessionConfig.h" + "${HDFS3_SOURCE_DIR}/common/Unordered.h" + "${HDFS3_SOURCE_DIR}/common/BigEndian.h" + "${HDFS3_SOURCE_DIR}/common/Thread.h" + "${HDFS3_SOURCE_DIR}/common/StackPrinter.h" + "${HDFS3_SOURCE_DIR}/common/Exception.h" + "${HDFS3_SOURCE_DIR}/common/WritableUtils.h" + "${HDFS3_SOURCE_DIR}/common/StringUtil.h" + "${HDFS3_SOURCE_DIR}/common/LruMap.h" + "${HDFS3_SOURCE_DIR}/common/Function.h" + "${HDFS3_SOURCE_DIR}/common/DateTime.h" + "${HDFS3_SOURCE_DIR}/common/Hash.h" + "${HDFS3_SOURCE_DIR}/common/SWCrc32c.h" + "${HDFS3_SOURCE_DIR}/common/ExceptionInternal.h" + "${HDFS3_SOURCE_DIR}/common/Memory.h" + "${HDFS3_SOURCE_DIR}/common/FileWrapper.h" ) # old kernels (< 3.17) doesn't have SYS_getrandom. Always use POSIX implementation to have better compatibility -set_source_files_properties(${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1") +set_source_files_properties("${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp" PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1") # target add_library(hdfs3 ${SRCS} ${PROTO_SOURCES} ${PROTO_HEADERS}) diff --git a/contrib/libpq-cmake/CMakeLists.txt b/contrib/libpq-cmake/CMakeLists.txt index 34c57799a8a..028fabe52b8 100644 --- a/contrib/libpq-cmake/CMakeLists.txt +++ b/contrib/libpq-cmake/CMakeLists.txt @@ -1,58 +1,58 @@ -set(LIBPQ_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpq) +set(LIBPQ_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpq") set(SRCS - ${LIBPQ_SOURCE_DIR}/fe-auth.c - ${LIBPQ_SOURCE_DIR}/fe-auth-scram.c - ${LIBPQ_SOURCE_DIR}/fe-connect.c - ${LIBPQ_SOURCE_DIR}/fe-exec.c - ${LIBPQ_SOURCE_DIR}/fe-lobj.c - ${LIBPQ_SOURCE_DIR}/fe-misc.c - ${LIBPQ_SOURCE_DIR}/fe-print.c - ${LIBPQ_SOURCE_DIR}/fe-protocol2.c - ${LIBPQ_SOURCE_DIR}/fe-protocol3.c - ${LIBPQ_SOURCE_DIR}/fe-secure.c - ${LIBPQ_SOURCE_DIR}/fe-secure-common.c - ${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c - ${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c - ${LIBPQ_SOURCE_DIR}/libpq-events.c - ${LIBPQ_SOURCE_DIR}/pqexpbuffer.c + "${LIBPQ_SOURCE_DIR}/fe-auth.c" + "${LIBPQ_SOURCE_DIR}/fe-auth-scram.c" + "${LIBPQ_SOURCE_DIR}/fe-connect.c" + "${LIBPQ_SOURCE_DIR}/fe-exec.c" + "${LIBPQ_SOURCE_DIR}/fe-lobj.c" + "${LIBPQ_SOURCE_DIR}/fe-misc.c" + "${LIBPQ_SOURCE_DIR}/fe-print.c" + "${LIBPQ_SOURCE_DIR}/fe-protocol2.c" + "${LIBPQ_SOURCE_DIR}/fe-protocol3.c" + "${LIBPQ_SOURCE_DIR}/fe-secure.c" + "${LIBPQ_SOURCE_DIR}/fe-secure-common.c" + "${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c" + "${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c" + "${LIBPQ_SOURCE_DIR}/libpq-events.c" + "${LIBPQ_SOURCE_DIR}/pqexpbuffer.c" - ${LIBPQ_SOURCE_DIR}/common/scram-common.c - ${LIBPQ_SOURCE_DIR}/common/sha2_openssl.c - ${LIBPQ_SOURCE_DIR}/common/md5.c - ${LIBPQ_SOURCE_DIR}/common/saslprep.c - ${LIBPQ_SOURCE_DIR}/common/unicode_norm.c - ${LIBPQ_SOURCE_DIR}/common/ip.c - ${LIBPQ_SOURCE_DIR}/common/jsonapi.c - ${LIBPQ_SOURCE_DIR}/common/wchar.c - ${LIBPQ_SOURCE_DIR}/common/base64.c - ${LIBPQ_SOURCE_DIR}/common/link-canary.c - ${LIBPQ_SOURCE_DIR}/common/fe_memutils.c - ${LIBPQ_SOURCE_DIR}/common/string.c - ${LIBPQ_SOURCE_DIR}/common/pg_get_line.c - ${LIBPQ_SOURCE_DIR}/common/stringinfo.c - ${LIBPQ_SOURCE_DIR}/common/psprintf.c - ${LIBPQ_SOURCE_DIR}/common/encnames.c - ${LIBPQ_SOURCE_DIR}/common/logging.c + "${LIBPQ_SOURCE_DIR}/common/scram-common.c" + "${LIBPQ_SOURCE_DIR}/common/sha2_openssl.c" + "${LIBPQ_SOURCE_DIR}/common/md5.c" + "${LIBPQ_SOURCE_DIR}/common/saslprep.c" + "${LIBPQ_SOURCE_DIR}/common/unicode_norm.c" + "${LIBPQ_SOURCE_DIR}/common/ip.c" + "${LIBPQ_SOURCE_DIR}/common/jsonapi.c" + "${LIBPQ_SOURCE_DIR}/common/wchar.c" + "${LIBPQ_SOURCE_DIR}/common/base64.c" + "${LIBPQ_SOURCE_DIR}/common/link-canary.c" + "${LIBPQ_SOURCE_DIR}/common/fe_memutils.c" + "${LIBPQ_SOURCE_DIR}/common/string.c" + "${LIBPQ_SOURCE_DIR}/common/pg_get_line.c" + "${LIBPQ_SOURCE_DIR}/common/stringinfo.c" + "${LIBPQ_SOURCE_DIR}/common/psprintf.c" + "${LIBPQ_SOURCE_DIR}/common/encnames.c" + "${LIBPQ_SOURCE_DIR}/common/logging.c" - ${LIBPQ_SOURCE_DIR}/port/snprintf.c - ${LIBPQ_SOURCE_DIR}/port/strlcpy.c - ${LIBPQ_SOURCE_DIR}/port/strerror.c - ${LIBPQ_SOURCE_DIR}/port/inet_net_ntop.c - ${LIBPQ_SOURCE_DIR}/port/getpeereid.c - ${LIBPQ_SOURCE_DIR}/port/chklocale.c - ${LIBPQ_SOURCE_DIR}/port/noblock.c - ${LIBPQ_SOURCE_DIR}/port/pg_strong_random.c - ${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c - ${LIBPQ_SOURCE_DIR}/port/thread.c - ${LIBPQ_SOURCE_DIR}/port/path.c - ${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c + "${LIBPQ_SOURCE_DIR}/port/snprintf.c" + "${LIBPQ_SOURCE_DIR}/port/strlcpy.c" + "${LIBPQ_SOURCE_DIR}/port/strerror.c" + "${LIBPQ_SOURCE_DIR}/port/inet_net_ntop.c" + "${LIBPQ_SOURCE_DIR}/port/getpeereid.c" + "${LIBPQ_SOURCE_DIR}/port/chklocale.c" + "${LIBPQ_SOURCE_DIR}/port/noblock.c" + "${LIBPQ_SOURCE_DIR}/port/pg_strong_random.c" + "${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c" + "${LIBPQ_SOURCE_DIR}/port/thread.c" + "${LIBPQ_SOURCE_DIR}/port/path.c" + "${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c" ) add_library(libpq ${SRCS}) target_include_directories (libpq PUBLIC ${LIBPQ_SOURCE_DIR}) -target_include_directories (libpq PUBLIC ${LIBPQ_SOURCE_DIR}/include) -target_include_directories (libpq PRIVATE ${LIBPQ_SOURCE_DIR}/configs) +target_include_directories (libpq PUBLIC "${LIBPQ_SOURCE_DIR}/include") +target_include_directories (libpq PRIVATE "${LIBPQ_SOURCE_DIR}/configs") target_link_libraries (libpq PRIVATE ssl) diff --git a/contrib/libpqxx-cmake/CMakeLists.txt b/contrib/libpqxx-cmake/CMakeLists.txt index ed372951f82..4edef7bdd82 100644 --- a/contrib/libpqxx-cmake/CMakeLists.txt +++ b/contrib/libpqxx-cmake/CMakeLists.txt @@ -1,70 +1,70 @@ -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpqxx) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpqxx") set (SRCS - ${LIBRARY_DIR}/src/strconv.cxx - ${LIBRARY_DIR}/src/array.cxx - ${LIBRARY_DIR}/src/binarystring.cxx - ${LIBRARY_DIR}/src/connection.cxx - ${LIBRARY_DIR}/src/cursor.cxx - ${LIBRARY_DIR}/src/encodings.cxx - ${LIBRARY_DIR}/src/errorhandler.cxx - ${LIBRARY_DIR}/src/except.cxx - ${LIBRARY_DIR}/src/field.cxx - ${LIBRARY_DIR}/src/largeobject.cxx - ${LIBRARY_DIR}/src/notification.cxx - ${LIBRARY_DIR}/src/pipeline.cxx - ${LIBRARY_DIR}/src/result.cxx - ${LIBRARY_DIR}/src/robusttransaction.cxx - ${LIBRARY_DIR}/src/sql_cursor.cxx - ${LIBRARY_DIR}/src/stream_from.cxx - ${LIBRARY_DIR}/src/stream_to.cxx - ${LIBRARY_DIR}/src/subtransaction.cxx - ${LIBRARY_DIR}/src/transaction.cxx - ${LIBRARY_DIR}/src/transaction_base.cxx - ${LIBRARY_DIR}/src/row.cxx - ${LIBRARY_DIR}/src/util.cxx - ${LIBRARY_DIR}/src/version.cxx + "${LIBRARY_DIR}/src/strconv.cxx" + "${LIBRARY_DIR}/src/array.cxx" + "${LIBRARY_DIR}/src/binarystring.cxx" + "${LIBRARY_DIR}/src/connection.cxx" + "${LIBRARY_DIR}/src/cursor.cxx" + "${LIBRARY_DIR}/src/encodings.cxx" + "${LIBRARY_DIR}/src/errorhandler.cxx" + "${LIBRARY_DIR}/src/except.cxx" + "${LIBRARY_DIR}/src/field.cxx" + "${LIBRARY_DIR}/src/largeobject.cxx" + "${LIBRARY_DIR}/src/notification.cxx" + "${LIBRARY_DIR}/src/pipeline.cxx" + "${LIBRARY_DIR}/src/result.cxx" + "${LIBRARY_DIR}/src/robusttransaction.cxx" + "${LIBRARY_DIR}/src/sql_cursor.cxx" + "${LIBRARY_DIR}/src/stream_from.cxx" + "${LIBRARY_DIR}/src/stream_to.cxx" + "${LIBRARY_DIR}/src/subtransaction.cxx" + "${LIBRARY_DIR}/src/transaction.cxx" + "${LIBRARY_DIR}/src/transaction_base.cxx" + "${LIBRARY_DIR}/src/row.cxx" + "${LIBRARY_DIR}/src/util.cxx" + "${LIBRARY_DIR}/src/version.cxx" ) # Need to explicitly include each header file, because in the directory include/pqxx there are also files # like just 'array'. So if including the whole directory with `target_include_directories`, it will make # conflicts with all includes of . set (HDRS - ${LIBRARY_DIR}/include/pqxx/array.hxx - ${LIBRARY_DIR}/include/pqxx/binarystring.hxx - ${LIBRARY_DIR}/include/pqxx/composite.hxx - ${LIBRARY_DIR}/include/pqxx/connection.hxx - ${LIBRARY_DIR}/include/pqxx/cursor.hxx - ${LIBRARY_DIR}/include/pqxx/dbtransaction.hxx - ${LIBRARY_DIR}/include/pqxx/errorhandler.hxx - ${LIBRARY_DIR}/include/pqxx/except.hxx - ${LIBRARY_DIR}/include/pqxx/field.hxx - ${LIBRARY_DIR}/include/pqxx/isolation.hxx - ${LIBRARY_DIR}/include/pqxx/largeobject.hxx - ${LIBRARY_DIR}/include/pqxx/nontransaction.hxx - ${LIBRARY_DIR}/include/pqxx/notification.hxx - ${LIBRARY_DIR}/include/pqxx/pipeline.hxx - ${LIBRARY_DIR}/include/pqxx/prepared_statement.hxx - ${LIBRARY_DIR}/include/pqxx/result.hxx - ${LIBRARY_DIR}/include/pqxx/robusttransaction.hxx - ${LIBRARY_DIR}/include/pqxx/row.hxx - ${LIBRARY_DIR}/include/pqxx/separated_list.hxx - ${LIBRARY_DIR}/include/pqxx/strconv.hxx - ${LIBRARY_DIR}/include/pqxx/stream_from.hxx - ${LIBRARY_DIR}/include/pqxx/stream_to.hxx - ${LIBRARY_DIR}/include/pqxx/subtransaction.hxx - ${LIBRARY_DIR}/include/pqxx/transaction.hxx - ${LIBRARY_DIR}/include/pqxx/transaction_base.hxx - ${LIBRARY_DIR}/include/pqxx/types.hxx - ${LIBRARY_DIR}/include/pqxx/util.hxx - ${LIBRARY_DIR}/include/pqxx/version.hxx - ${LIBRARY_DIR}/include/pqxx/zview.hxx + "${LIBRARY_DIR}/include/pqxx/array.hxx" + "${LIBRARY_DIR}/include/pqxx/binarystring.hxx" + "${LIBRARY_DIR}/include/pqxx/composite.hxx" + "${LIBRARY_DIR}/include/pqxx/connection.hxx" + "${LIBRARY_DIR}/include/pqxx/cursor.hxx" + "${LIBRARY_DIR}/include/pqxx/dbtransaction.hxx" + "${LIBRARY_DIR}/include/pqxx/errorhandler.hxx" + "${LIBRARY_DIR}/include/pqxx/except.hxx" + "${LIBRARY_DIR}/include/pqxx/field.hxx" + "${LIBRARY_DIR}/include/pqxx/isolation.hxx" + "${LIBRARY_DIR}/include/pqxx/largeobject.hxx" + "${LIBRARY_DIR}/include/pqxx/nontransaction.hxx" + "${LIBRARY_DIR}/include/pqxx/notification.hxx" + "${LIBRARY_DIR}/include/pqxx/pipeline.hxx" + "${LIBRARY_DIR}/include/pqxx/prepared_statement.hxx" + "${LIBRARY_DIR}/include/pqxx/result.hxx" + "${LIBRARY_DIR}/include/pqxx/robusttransaction.hxx" + "${LIBRARY_DIR}/include/pqxx/row.hxx" + "${LIBRARY_DIR}/include/pqxx/separated_list.hxx" + "${LIBRARY_DIR}/include/pqxx/strconv.hxx" + "${LIBRARY_DIR}/include/pqxx/stream_from.hxx" + "${LIBRARY_DIR}/include/pqxx/stream_to.hxx" + "${LIBRARY_DIR}/include/pqxx/subtransaction.hxx" + "${LIBRARY_DIR}/include/pqxx/transaction.hxx" + "${LIBRARY_DIR}/include/pqxx/transaction_base.hxx" + "${LIBRARY_DIR}/include/pqxx/types.hxx" + "${LIBRARY_DIR}/include/pqxx/util.hxx" + "${LIBRARY_DIR}/include/pqxx/version.hxx" + "${LIBRARY_DIR}/include/pqxx/zview.hxx" ) add_library(libpqxx ${SRCS} ${HDRS}) target_link_libraries(libpqxx PUBLIC ${LIBPQ_LIBRARY}) -target_include_directories (libpqxx PRIVATE ${LIBRARY_DIR}/include) +target_include_directories (libpqxx PRIVATE "${LIBRARY_DIR}/include") # crutch set(CM_CONFIG_H_IN "${LIBRARY_DIR}/include/pqxx/config.h.in") diff --git a/contrib/librdkafka b/contrib/librdkafka index cf11d0aa36d..43491d33ca2 160000 --- a/contrib/librdkafka +++ b/contrib/librdkafka @@ -1 +1 @@ -Subproject commit cf11d0aa36d4738f2c9bf4377807661660f1be76 +Subproject commit 43491d33ca2826531d1e3cae70d4bf1e5249e3c9 diff --git a/contrib/librdkafka-cmake/CMakeLists.txt b/contrib/librdkafka-cmake/CMakeLists.txt index 2b55b22cd2b..97b6a7e1ec5 100644 --- a/contrib/librdkafka-cmake/CMakeLists.txt +++ b/contrib/librdkafka-cmake/CMakeLists.txt @@ -1,83 +1,83 @@ -set(RDKAFKA_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/librdkafka/src) +set(RDKAFKA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/librdkafka/src") set(SRCS - ${RDKAFKA_SOURCE_DIR}/crc32c.c -# ${RDKAFKA_SOURCE_DIR}/lz4.c -# ${RDKAFKA_SOURCE_DIR}/lz4frame.c -# ${RDKAFKA_SOURCE_DIR}/lz4hc.c - ${RDKAFKA_SOURCE_DIR}/rdaddr.c - ${RDKAFKA_SOURCE_DIR}/rdavl.c - ${RDKAFKA_SOURCE_DIR}/rdbuf.c - ${RDKAFKA_SOURCE_DIR}/rdcrc32.c - ${RDKAFKA_SOURCE_DIR}/rddl.c - ${RDKAFKA_SOURCE_DIR}/rdfnv1a.c - ${RDKAFKA_SOURCE_DIR}/rdgz.c - ${RDKAFKA_SOURCE_DIR}/rdhdrhistogram.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_admin.c # looks optional - ${RDKAFKA_SOURCE_DIR}/rdkafka_assignment.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_assignor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_aux.c # looks optional - ${RDKAFKA_SOURCE_DIR}/rdkafka_background.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_broker.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_buf.c - ${RDKAFKA_SOURCE_DIR}/rdkafka.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_cert.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_cgrp.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_conf.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_coord.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_error.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_event.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_feature.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_header.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_idempotence.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_interceptor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_lz4.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_metadata.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_metadata_cache.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_mock.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_mock_cgrp.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_mock_handlers.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_msg.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_reader.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_writer.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_offset.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_op.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_partition.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_pattern.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_plugin.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_queue.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_range_assignor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_request.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_roundrobin_assignor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl.c -# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c # optionally included below -# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c # optionally included below - ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_plain.c -# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c # optionally included below -# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_win32.c -# ${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c # optionally included below - ${RDKAFKA_SOURCE_DIR}/rdkafka_sticky_assignor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_subscription.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_timer.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_topic.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_transport.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_txnmgr.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_zstd.c - ${RDKAFKA_SOURCE_DIR}/rdlist.c - ${RDKAFKA_SOURCE_DIR}/rdlog.c - ${RDKAFKA_SOURCE_DIR}/rdmap.c - ${RDKAFKA_SOURCE_DIR}/rdmurmur2.c - ${RDKAFKA_SOURCE_DIR}/rdports.c - ${RDKAFKA_SOURCE_DIR}/rdrand.c - ${RDKAFKA_SOURCE_DIR}/rdregex.c - ${RDKAFKA_SOURCE_DIR}/rdstring.c - ${RDKAFKA_SOURCE_DIR}/rdunittest.c - ${RDKAFKA_SOURCE_DIR}/rdvarint.c - ${RDKAFKA_SOURCE_DIR}/rdxxhash.c - # ${RDKAFKA_SOURCE_DIR}/regexp.c - ${RDKAFKA_SOURCE_DIR}/snappy.c - ${RDKAFKA_SOURCE_DIR}/tinycthread.c - ${RDKAFKA_SOURCE_DIR}/tinycthread_extra.c + "${RDKAFKA_SOURCE_DIR}/crc32c.c" +# "${RDKAFKA_SOURCE_DIR}/lz4.c" +# "${RDKAFKA_SOURCE_DIR}/lz4frame.c" +# "${RDKAFKA_SOURCE_DIR}/lz4hc.c" + "${RDKAFKA_SOURCE_DIR}/rdaddr.c" + "${RDKAFKA_SOURCE_DIR}/rdavl.c" + "${RDKAFKA_SOURCE_DIR}/rdbuf.c" + "${RDKAFKA_SOURCE_DIR}/rdcrc32.c" + "${RDKAFKA_SOURCE_DIR}/rddl.c" + "${RDKAFKA_SOURCE_DIR}/rdfnv1a.c" + "${RDKAFKA_SOURCE_DIR}/rdgz.c" + "${RDKAFKA_SOURCE_DIR}/rdhdrhistogram.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_admin.c" # looks optional + "${RDKAFKA_SOURCE_DIR}/rdkafka_assignment.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_assignor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_aux.c" # looks optional + "${RDKAFKA_SOURCE_DIR}/rdkafka_background.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_broker.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_buf.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_cert.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_cgrp.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_conf.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_coord.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_error.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_event.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_feature.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_header.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_idempotence.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_interceptor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_lz4.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_metadata.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_metadata_cache.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_mock.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_mock_cgrp.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_mock_handlers.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_msg.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_reader.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_writer.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_offset.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_op.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_partition.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_pattern.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_plugin.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_queue.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_range_assignor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_request.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_roundrobin_assignor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl.c" +# "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c" # optionally included below +# "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c" # optionally included below + "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_plain.c" +# "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c" # optionally included below +# "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_win32.c" +# "${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c" # optionally included below + "${RDKAFKA_SOURCE_DIR}/rdkafka_sticky_assignor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_subscription.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_timer.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_topic.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_transport.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_txnmgr.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_zstd.c" + "${RDKAFKA_SOURCE_DIR}/rdlist.c" + "${RDKAFKA_SOURCE_DIR}/rdlog.c" + "${RDKAFKA_SOURCE_DIR}/rdmap.c" + "${RDKAFKA_SOURCE_DIR}/rdmurmur2.c" + "${RDKAFKA_SOURCE_DIR}/rdports.c" + "${RDKAFKA_SOURCE_DIR}/rdrand.c" + "${RDKAFKA_SOURCE_DIR}/rdregex.c" + "${RDKAFKA_SOURCE_DIR}/rdstring.c" + "${RDKAFKA_SOURCE_DIR}/rdunittest.c" + "${RDKAFKA_SOURCE_DIR}/rdvarint.c" + "${RDKAFKA_SOURCE_DIR}/rdxxhash.c" + # "${RDKAFKA_SOURCE_DIR}/regexp.c" + "${RDKAFKA_SOURCE_DIR}/snappy.c" + "${RDKAFKA_SOURCE_DIR}/tinycthread.c" + "${RDKAFKA_SOURCE_DIR}/tinycthread_extra.c" ) if(${ENABLE_CYRUS_SASL}) @@ -96,28 +96,28 @@ if(OPENSSL_FOUND) endif() if(WITH_SSL) - list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c) + list(APPEND SRCS "${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c") endif() if(WITH_SASL_CYRUS) - list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c) # needed to support Kerberos, requires cyrus-sasl + list(APPEND SRCS "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c") # needed to support Kerberos, requires cyrus-sasl endif() if(WITH_SASL_SCRAM) - list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c) + list(APPEND SRCS "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c") endif() if(WITH_SASL_OAUTHBEARER) - list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c) + list(APPEND SRCS "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c") endif() add_library(rdkafka ${SRCS}) target_compile_options(rdkafka PRIVATE -fno-sanitize=undefined) # target_include_directories(rdkafka SYSTEM PUBLIC include) -target_include_directories(rdkafka SYSTEM PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) # for "librdkafka/rdkafka.h" +target_include_directories(rdkafka SYSTEM PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") # for "librdkafka/rdkafka.h" target_include_directories(rdkafka SYSTEM PUBLIC ${RDKAFKA_SOURCE_DIR}) # Because weird logic with "include_next" is used. -target_include_directories(rdkafka SYSTEM PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/auxdir) # for "../config.h" -target_include_directories(rdkafka SYSTEM PRIVATE ${ZSTD_INCLUDE_DIR}/common) # Because wrong path to "zstd_errors.h" is used. +target_include_directories(rdkafka SYSTEM PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/auxdir") # for "../config.h" +target_include_directories(rdkafka SYSTEM PRIVATE "${ZSTD_INCLUDE_DIR}/common") # Because wrong path to "zstd_errors.h" is used. target_link_libraries(rdkafka PRIVATE lz4 ${ZLIB_LIBRARIES} ${ZSTD_LIBRARY}) if(OPENSSL_SSL_LIBRARY AND OPENSSL_CRYPTO_LIBRARY) target_link_libraries(rdkafka PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) @@ -126,7 +126,7 @@ if(${ENABLE_CYRUS_SASL}) target_link_libraries(rdkafka PRIVATE ${CYRUS_SASL_LIBRARY}) endif() -file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/auxdir) +file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/auxdir") configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/config.h.in" diff --git a/contrib/librdkafka-cmake/config.h.in b/contrib/librdkafka-cmake/config.h.in index 80b6ea61b6e..9fecb45e42d 100644 --- a/contrib/librdkafka-cmake/config.h.in +++ b/contrib/librdkafka-cmake/config.h.in @@ -66,7 +66,7 @@ #cmakedefine WITH_SASL_OAUTHBEARER 1 #cmakedefine WITH_SASL_CYRUS 1 // crc32chw -#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) +#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__)) #define WITH_CRC32C_HW 1 #endif // regex @@ -75,6 +75,8 @@ #define HAVE_STRNDUP 1 // strerror_r #define HAVE_STRERROR_R 1 +// rand_r +#define HAVE_RAND_R 1 #ifdef __APPLE__ // pthread_setname_np diff --git a/contrib/libunwind b/contrib/libunwind index 8fe25d7dc70..53734f420f1 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit 8fe25d7dc70f2a4ea38c3e5a33fa9d4199b67a5a +Subproject commit 53734f420f166e1ca2732dec8998469bfbb7731d diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt index 3afff30eee7..1a9f5e50abd 100644 --- a/contrib/libunwind-cmake/CMakeLists.txt +++ b/contrib/libunwind-cmake/CMakeLists.txt @@ -1,27 +1,27 @@ include(CheckCCompilerFlag) include(CheckCXXCompilerFlag) -set(LIBUNWIND_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libunwind) +set(LIBUNWIND_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libunwind") set(LIBUNWIND_CXX_SOURCES - ${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp - ${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp - ${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp) + "${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp" + "${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp" + "${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp") if (APPLE) - set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} ${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp) + set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp") endif () set(LIBUNWIND_C_SOURCES - ${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c - ${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1-gcc-ext.c - ${LIBUNWIND_SOURCE_DIR}/src/Unwind-sjlj.c + "${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c" + "${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1-gcc-ext.c" + "${LIBUNWIND_SOURCE_DIR}/src/Unwind-sjlj.c" # Use unw_backtrace to override libgcc's backtrace symbol for better ABI compatibility unwind-override.c) set_source_files_properties(${LIBUNWIND_C_SOURCES} PROPERTIES COMPILE_FLAGS "-std=c99") set(LIBUNWIND_ASM_SOURCES - ${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S - ${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S) + "${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S" + "${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S") # CMake doesn't pass the correct architecture for Apple prior to CMake 3.19 [1] # Workaround these two issues by compiling as C. diff --git a/contrib/libxml2-cmake/CMakeLists.txt b/contrib/libxml2-cmake/CMakeLists.txt index 068662c7213..8fda0399ea3 100644 --- a/contrib/libxml2-cmake/CMakeLists.txt +++ b/contrib/libxml2-cmake/CMakeLists.txt @@ -1,54 +1,54 @@ -set(LIBXML2_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libxml2) -set(LIBXML2_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/libxml2) +set(LIBXML2_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libxml2") +set(LIBXML2_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/libxml2") set(SRCS - ${LIBXML2_SOURCE_DIR}/SAX.c - ${LIBXML2_SOURCE_DIR}/entities.c - ${LIBXML2_SOURCE_DIR}/encoding.c - ${LIBXML2_SOURCE_DIR}/error.c - ${LIBXML2_SOURCE_DIR}/parserInternals.c - ${LIBXML2_SOURCE_DIR}/parser.c - ${LIBXML2_SOURCE_DIR}/tree.c - ${LIBXML2_SOURCE_DIR}/hash.c - ${LIBXML2_SOURCE_DIR}/list.c - ${LIBXML2_SOURCE_DIR}/xmlIO.c - ${LIBXML2_SOURCE_DIR}/xmlmemory.c - ${LIBXML2_SOURCE_DIR}/uri.c - ${LIBXML2_SOURCE_DIR}/valid.c - ${LIBXML2_SOURCE_DIR}/xlink.c - ${LIBXML2_SOURCE_DIR}/HTMLparser.c - ${LIBXML2_SOURCE_DIR}/HTMLtree.c - ${LIBXML2_SOURCE_DIR}/debugXML.c - ${LIBXML2_SOURCE_DIR}/xpath.c - ${LIBXML2_SOURCE_DIR}/xpointer.c - ${LIBXML2_SOURCE_DIR}/xinclude.c - ${LIBXML2_SOURCE_DIR}/nanohttp.c - ${LIBXML2_SOURCE_DIR}/nanoftp.c - ${LIBXML2_SOURCE_DIR}/DOCBparser.c - ${LIBXML2_SOURCE_DIR}/catalog.c - ${LIBXML2_SOURCE_DIR}/globals.c - ${LIBXML2_SOURCE_DIR}/threads.c - ${LIBXML2_SOURCE_DIR}/c14n.c - ${LIBXML2_SOURCE_DIR}/xmlstring.c - ${LIBXML2_SOURCE_DIR}/buf.c - ${LIBXML2_SOURCE_DIR}/xmlregexp.c - ${LIBXML2_SOURCE_DIR}/xmlschemas.c - ${LIBXML2_SOURCE_DIR}/xmlschemastypes.c - ${LIBXML2_SOURCE_DIR}/xmlunicode.c - ${LIBXML2_SOURCE_DIR}/triostr.c - #${LIBXML2_SOURCE_DIR}/trio.c - ${LIBXML2_SOURCE_DIR}/xmlreader.c - ${LIBXML2_SOURCE_DIR}/relaxng.c - ${LIBXML2_SOURCE_DIR}/dict.c - ${LIBXML2_SOURCE_DIR}/SAX2.c - ${LIBXML2_SOURCE_DIR}/xmlwriter.c - ${LIBXML2_SOURCE_DIR}/legacy.c - ${LIBXML2_SOURCE_DIR}/chvalid.c - ${LIBXML2_SOURCE_DIR}/pattern.c - ${LIBXML2_SOURCE_DIR}/xmlsave.c - ${LIBXML2_SOURCE_DIR}/xmlmodule.c - ${LIBXML2_SOURCE_DIR}/schematron.c - ${LIBXML2_SOURCE_DIR}/xzlib.c + "${LIBXML2_SOURCE_DIR}/SAX.c" + "${LIBXML2_SOURCE_DIR}/entities.c" + "${LIBXML2_SOURCE_DIR}/encoding.c" + "${LIBXML2_SOURCE_DIR}/error.c" + "${LIBXML2_SOURCE_DIR}/parserInternals.c" + "${LIBXML2_SOURCE_DIR}/parser.c" + "${LIBXML2_SOURCE_DIR}/tree.c" + "${LIBXML2_SOURCE_DIR}/hash.c" + "${LIBXML2_SOURCE_DIR}/list.c" + "${LIBXML2_SOURCE_DIR}/xmlIO.c" + "${LIBXML2_SOURCE_DIR}/xmlmemory.c" + "${LIBXML2_SOURCE_DIR}/uri.c" + "${LIBXML2_SOURCE_DIR}/valid.c" + "${LIBXML2_SOURCE_DIR}/xlink.c" + "${LIBXML2_SOURCE_DIR}/HTMLparser.c" + "${LIBXML2_SOURCE_DIR}/HTMLtree.c" + "${LIBXML2_SOURCE_DIR}/debugXML.c" + "${LIBXML2_SOURCE_DIR}/xpath.c" + "${LIBXML2_SOURCE_DIR}/xpointer.c" + "${LIBXML2_SOURCE_DIR}/xinclude.c" + "${LIBXML2_SOURCE_DIR}/nanohttp.c" + "${LIBXML2_SOURCE_DIR}/nanoftp.c" + "${LIBXML2_SOURCE_DIR}/DOCBparser.c" + "${LIBXML2_SOURCE_DIR}/catalog.c" + "${LIBXML2_SOURCE_DIR}/globals.c" + "${LIBXML2_SOURCE_DIR}/threads.c" + "${LIBXML2_SOURCE_DIR}/c14n.c" + "${LIBXML2_SOURCE_DIR}/xmlstring.c" + "${LIBXML2_SOURCE_DIR}/buf.c" + "${LIBXML2_SOURCE_DIR}/xmlregexp.c" + "${LIBXML2_SOURCE_DIR}/xmlschemas.c" + "${LIBXML2_SOURCE_DIR}/xmlschemastypes.c" + "${LIBXML2_SOURCE_DIR}/xmlunicode.c" + "${LIBXML2_SOURCE_DIR}/triostr.c" + #"${LIBXML2_SOURCE_DIR}/trio.c" + "${LIBXML2_SOURCE_DIR}/xmlreader.c" + "${LIBXML2_SOURCE_DIR}/relaxng.c" + "${LIBXML2_SOURCE_DIR}/dict.c" + "${LIBXML2_SOURCE_DIR}/SAX2.c" + "${LIBXML2_SOURCE_DIR}/xmlwriter.c" + "${LIBXML2_SOURCE_DIR}/legacy.c" + "${LIBXML2_SOURCE_DIR}/chvalid.c" + "${LIBXML2_SOURCE_DIR}/pattern.c" + "${LIBXML2_SOURCE_DIR}/xmlsave.c" + "${LIBXML2_SOURCE_DIR}/xmlmodule.c" + "${LIBXML2_SOURCE_DIR}/schematron.c" + "${LIBXML2_SOURCE_DIR}/xzlib.c" ) add_library(libxml2 ${SRCS}) @@ -57,6 +57,6 @@ if(M_LIBRARY) target_link_libraries(libxml2 PRIVATE ${M_LIBRARY}) endif() -target_include_directories(libxml2 PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include) -target_include_directories(libxml2 PUBLIC ${LIBXML2_SOURCE_DIR}/include) +target_include_directories(libxml2 PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include") +target_include_directories(libxml2 PUBLIC "${LIBXML2_SOURCE_DIR}/include") target_include_directories(libxml2 SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR}) diff --git a/contrib/llvm b/contrib/llvm index 8f24d507c1c..e5751459412 160000 --- a/contrib/llvm +++ b/contrib/llvm @@ -1 +1 @@ -Subproject commit 8f24d507c1cfeec66d27f48fe74518fd278e2d25 +Subproject commit e5751459412bce1391fb7a2e9bbc01e131bf72f1 diff --git a/contrib/lz4-cmake/CMakeLists.txt b/contrib/lz4-cmake/CMakeLists.txt index 72510d72534..77e00d4295b 100644 --- a/contrib/lz4-cmake/CMakeLists.txt +++ b/contrib/lz4-cmake/CMakeLists.txt @@ -33,5 +33,5 @@ if (NOT EXTERNAL_LZ4_LIBRARY_FOUND) if (SANITIZE STREQUAL "undefined") target_compile_options (lz4 PRIVATE -fno-sanitize=undefined) endif () - target_include_directories(lz4 PUBLIC ${LIBRARY_DIR}/lib) + target_include_directories(lz4 PUBLIC "${LIBRARY_DIR}/lib") endif () diff --git a/contrib/mariadb-connector-c b/contrib/mariadb-connector-c index f4476ee7311..5f4034a3a63 160000 --- a/contrib/mariadb-connector-c +++ b/contrib/mariadb-connector-c @@ -1 +1 @@ -Subproject commit f4476ee7311b35b593750f6ae2cbdb62a4006374 +Subproject commit 5f4034a3a6376416504f17186c55fe401c6d8e5e diff --git a/contrib/nanodbc b/contrib/nanodbc new file mode 160000 index 00000000000..9fc45967551 --- /dev/null +++ b/contrib/nanodbc @@ -0,0 +1 @@ +Subproject commit 9fc459675515d491401727ec67fca38db721f28c diff --git a/contrib/nanodbc-cmake/CMakeLists.txt b/contrib/nanodbc-cmake/CMakeLists.txt new file mode 100644 index 00000000000..26a030c3995 --- /dev/null +++ b/contrib/nanodbc-cmake/CMakeLists.txt @@ -0,0 +1,18 @@ +if (NOT USE_INTERNAL_NANODBC_LIBRARY) + return () +endif () + +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc") + +if (NOT TARGET unixodbc) + message(FATAL_ERROR "Configuration error: unixodbc is not a target") +endif() + +set (SRCS + "${LIBRARY_DIR}/nanodbc/nanodbc.cpp" +) + +add_library(nanodbc ${SRCS}) + +target_link_libraries (nanodbc PUBLIC unixodbc) +target_include_directories (nanodbc SYSTEM PUBLIC "${LIBRARY_DIR}/") diff --git a/contrib/nuraft-cmake/CMakeLists.txt b/contrib/nuraft-cmake/CMakeLists.txt index 83137fe73bf..725e86195e1 100644 --- a/contrib/nuraft-cmake/CMakeLists.txt +++ b/contrib/nuraft-cmake/CMakeLists.txt @@ -1,30 +1,30 @@ -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/NuRaft) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/NuRaft") set(SRCS - ${LIBRARY_DIR}/src/handle_priority.cxx - ${LIBRARY_DIR}/src/buffer_serializer.cxx - ${LIBRARY_DIR}/src/peer.cxx - ${LIBRARY_DIR}/src/global_mgr.cxx - ${LIBRARY_DIR}/src/buffer.cxx - ${LIBRARY_DIR}/src/asio_service.cxx - ${LIBRARY_DIR}/src/handle_client_request.cxx - ${LIBRARY_DIR}/src/raft_server.cxx - ${LIBRARY_DIR}/src/snapshot.cxx - ${LIBRARY_DIR}/src/handle_commit.cxx - ${LIBRARY_DIR}/src/error_code.cxx - ${LIBRARY_DIR}/src/crc32.cxx - ${LIBRARY_DIR}/src/handle_snapshot_sync.cxx - ${LIBRARY_DIR}/src/stat_mgr.cxx - ${LIBRARY_DIR}/src/handle_join_leave.cxx - ${LIBRARY_DIR}/src/handle_user_cmd.cxx - ${LIBRARY_DIR}/src/handle_custom_notification.cxx - ${LIBRARY_DIR}/src/handle_vote.cxx - ${LIBRARY_DIR}/src/launcher.cxx - ${LIBRARY_DIR}/src/srv_config.cxx - ${LIBRARY_DIR}/src/snapshot_sync_req.cxx - ${LIBRARY_DIR}/src/handle_timeout.cxx - ${LIBRARY_DIR}/src/handle_append_entries.cxx - ${LIBRARY_DIR}/src/cluster_config.cxx + "${LIBRARY_DIR}/src/handle_priority.cxx" + "${LIBRARY_DIR}/src/buffer_serializer.cxx" + "${LIBRARY_DIR}/src/peer.cxx" + "${LIBRARY_DIR}/src/global_mgr.cxx" + "${LIBRARY_DIR}/src/buffer.cxx" + "${LIBRARY_DIR}/src/asio_service.cxx" + "${LIBRARY_DIR}/src/handle_client_request.cxx" + "${LIBRARY_DIR}/src/raft_server.cxx" + "${LIBRARY_DIR}/src/snapshot.cxx" + "${LIBRARY_DIR}/src/handle_commit.cxx" + "${LIBRARY_DIR}/src/error_code.cxx" + "${LIBRARY_DIR}/src/crc32.cxx" + "${LIBRARY_DIR}/src/handle_snapshot_sync.cxx" + "${LIBRARY_DIR}/src/stat_mgr.cxx" + "${LIBRARY_DIR}/src/handle_join_leave.cxx" + "${LIBRARY_DIR}/src/handle_user_cmd.cxx" + "${LIBRARY_DIR}/src/handle_custom_notification.cxx" + "${LIBRARY_DIR}/src/handle_vote.cxx" + "${LIBRARY_DIR}/src/launcher.cxx" + "${LIBRARY_DIR}/src/srv_config.cxx" + "${LIBRARY_DIR}/src/snapshot_sync_req.cxx" + "${LIBRARY_DIR}/src/handle_timeout.cxx" + "${LIBRARY_DIR}/src/handle_append_entries.cxx" + "${LIBRARY_DIR}/src/cluster_config.cxx" ) @@ -37,9 +37,9 @@ else() target_compile_definitions(nuraft PRIVATE USE_BOOST_ASIO=1 BOOST_ASIO_STANDALONE=1) endif() -target_include_directories (nuraft SYSTEM PRIVATE ${LIBRARY_DIR}/include/libnuraft) +target_include_directories (nuraft SYSTEM PRIVATE "${LIBRARY_DIR}/include/libnuraft") # for some reason include "asio.h" directly without "boost/" prefix. -target_include_directories (nuraft SYSTEM PRIVATE ${ClickHouse_SOURCE_DIR}/contrib/boost/boost) +target_include_directories (nuraft SYSTEM PRIVATE "${ClickHouse_SOURCE_DIR}/contrib/boost/boost") target_link_libraries (nuraft PRIVATE boost::headers_only boost::coroutine) @@ -47,4 +47,4 @@ if(OPENSSL_SSL_LIBRARY AND OPENSSL_CRYPTO_LIBRARY) target_link_libraries (nuraft PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) endif() -target_include_directories (nuraft SYSTEM PUBLIC ${LIBRARY_DIR}/include) +target_include_directories (nuraft SYSTEM PUBLIC "${LIBRARY_DIR}/include") diff --git a/contrib/openldap-cmake/CMakeLists.txt b/contrib/openldap-cmake/CMakeLists.txt index b0a5f4048ff..0892403bb62 100644 --- a/contrib/openldap-cmake/CMakeLists.txt +++ b/contrib/openldap-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -set(OPENLDAP_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/openldap) +set(OPENLDAP_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap") # How these lists were generated? # I compiled the original OpenLDAP with it's original build system and copied the list of source files from build commands. @@ -12,9 +12,9 @@ set(OPENLDAP_VERSION_STRING "2.5.X") macro(mkversion _lib_name) add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c - COMMAND ${CMAKE_COMMAND} -E env bash -c "${OPENLDAP_SOURCE_DIR}/build/mkversion -v '${OPENLDAP_VERSION_STRING}' liblber.la > ${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c" - MAIN_DEPENDENCY ${OPENLDAP_SOURCE_DIR}/build/mkversion + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c" + COMMAND ${CMAKE_COMMAND} -E env bash -c "${OPENLDAP_SOURCE_DIR}/build/mkversion -v '${OPENLDAP_VERSION_STRING}' liblber.la > \"${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c\"" + MAIN_DEPENDENCY "${OPENLDAP_SOURCE_DIR}/build/mkversion" WORKING_DIRECTORY ${OPENLDAP_SOURCE_DIR} VERBATIM ) @@ -37,23 +37,23 @@ endif() set(_extra_build_dir "${CMAKE_CURRENT_SOURCE_DIR}/${_system_name}_${_system_processor}") set(_lber_srcs - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/assert.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/decode.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/encode.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/io.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/bprint.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/debug.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/memory.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/options.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/sockbuf.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/stdio.c + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/assert.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/decode.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/encode.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/io.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/bprint.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/debug.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/memory.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/options.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/sockbuf.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/stdio.c" ) mkversion(lber) add_library(lber ${_libs_type} ${_lber_srcs} - ${CMAKE_CURRENT_BINARY_DIR}/lber-version.c + "${CMAKE_CURRENT_BINARY_DIR}/lber-version.c" ) target_link_libraries(lber @@ -62,8 +62,8 @@ target_link_libraries(lber target_include_directories(lber PRIVATE ${_extra_build_dir}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/liblber + PRIVATE "${OPENLDAP_SOURCE_DIR}/include" + PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/liblber" PRIVATE ${OPENSSL_INCLUDE_DIR} ) @@ -72,78 +72,78 @@ target_compile_definitions(lber ) set(_ldap_srcs - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/bind.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/open.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/result.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/error.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/compare.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/search.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/controls.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/messages.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/references.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/extended.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/cyrus.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/modify.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/add.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/modrdn.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/delete.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/abandon.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sasl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sbind.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/unbind.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/cancel.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/filter.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/free.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sort.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/passwd.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/whoami.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/vc.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getdn.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getentry.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getattr.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getvalues.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/addentry.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/request.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-ip.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/url.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/pagectrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sortctrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/vlvctrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/init.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/options.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/print.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/string.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/util-int.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/schema.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/charray.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-local.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/dnssrv.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8-conv.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls2.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_o.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_g.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/turn.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ppolicy.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/dds.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/txn.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldap_sync.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/stctrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/assertion.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/deref.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldifutil.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldif.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/fetch.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/lbase64.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/msctrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/psearchctrl.c + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/bind.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/open.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/result.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/error.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/compare.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/search.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/controls.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/messages.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/references.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/extended.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/cyrus.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/modify.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/add.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/modrdn.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/delete.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/abandon.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/sasl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/sbind.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/unbind.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/cancel.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/filter.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/free.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/sort.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/passwd.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/whoami.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/vc.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/getdn.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/getentry.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/getattr.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/getvalues.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/addentry.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/request.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-ip.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/url.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/pagectrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/sortctrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/vlvctrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/init.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/options.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/print.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/string.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/util-int.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/schema.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/charray.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-local.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/dnssrv.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8-conv.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls2.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_o.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_g.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/turn.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/ppolicy.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/dds.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/txn.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldap_sync.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/stctrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/assertion.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/deref.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldifutil.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldif.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/fetch.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/lbase64.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/msctrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/psearchctrl.c" ) mkversion(ldap) add_library(ldap ${_libs_type} ${_ldap_srcs} - ${CMAKE_CURRENT_BINARY_DIR}/ldap-version.c + "${CMAKE_CURRENT_BINARY_DIR}/ldap-version.c" ) target_link_libraries(ldap @@ -153,8 +153,8 @@ target_link_libraries(ldap target_include_directories(ldap PRIVATE ${_extra_build_dir}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap + PRIVATE "${OPENLDAP_SOURCE_DIR}/include" + PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/libldap" PRIVATE ${OPENSSL_INCLUDE_DIR} ) @@ -163,16 +163,16 @@ target_compile_definitions(ldap ) set(_ldap_r_specific_srcs - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/threads.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rdwr.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/tpool.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rq.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_posix.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_thr.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_nt.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_pth.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_stub.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_debug.c + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/threads.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rdwr.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/tpool.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rq.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_posix.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_thr.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_nt.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_pth.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_stub.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_debug.c" ) mkversion(ldap_r) @@ -180,7 +180,7 @@ mkversion(ldap_r) add_library(ldap_r ${_libs_type} ${_ldap_r_specific_srcs} ${_ldap_srcs} - ${CMAKE_CURRENT_BINARY_DIR}/ldap_r-version.c + "${CMAKE_CURRENT_BINARY_DIR}/ldap_r-version.c" ) target_link_libraries(ldap_r @@ -190,9 +190,9 @@ target_link_libraries(ldap_r target_include_directories(ldap_r PRIVATE ${_extra_build_dir}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r - PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap + PRIVATE "${OPENLDAP_SOURCE_DIR}/include" + PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r" + PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/libldap" PRIVATE ${OPENSSL_INCLUDE_DIR} ) diff --git a/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h b/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h new file mode 100644 index 00000000000..dbd59430527 --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h @@ -0,0 +1,63 @@ +/* include/lber_types.h. Generated from lber_types.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LBER types + */ + +#ifndef _LBER_TYPES_H +#define _LBER_TYPES_H + +#include + +LDAP_BEGIN_DECL + +/* LBER boolean, enum, integers (32 bits or larger) */ +#define LBER_INT_T int + +/* LBER tags (32 bits or larger) */ +#define LBER_TAG_T long + +/* LBER socket descriptor */ +#define LBER_SOCKET_T int + +/* LBER lengths (32 bits or larger) */ +#define LBER_LEN_T long + +/* ------------------------------------------------------------ */ + +/* booleans, enumerations, and integers */ +typedef LBER_INT_T ber_int_t; + +/* signed and unsigned versions */ +typedef signed LBER_INT_T ber_sint_t; +typedef unsigned LBER_INT_T ber_uint_t; + +/* tags */ +typedef unsigned LBER_TAG_T ber_tag_t; + +/* "socket" descriptors */ +typedef LBER_SOCKET_T ber_socket_t; + +/* lengths */ +typedef unsigned LBER_LEN_T ber_len_t; + +/* signed lengths */ +typedef signed LBER_LEN_T ber_slen_t; + +LDAP_END_DECL + +#endif /* _LBER_TYPES_H */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h b/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h new file mode 100644 index 00000000000..89f7b40b884 --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h @@ -0,0 +1,74 @@ +/* include/ldap_config.h. Generated from ldap_config.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * This file works in conjunction with OpenLDAP configure system. + * If you do no like the values below, adjust your configure options. + */ + +#ifndef _LDAP_CONFIG_H +#define _LDAP_CONFIG_H + +/* directory separator */ +#ifndef LDAP_DIRSEP +#ifndef _WIN32 +#define LDAP_DIRSEP "/" +#else +#define LDAP_DIRSEP "\\" +#endif +#endif + +/* directory for temporary files */ +#if defined(_WIN32) +# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */ +#elif defined( _P_tmpdir ) +# define LDAP_TMPDIR _P_tmpdir +#elif defined( P_tmpdir ) +# define LDAP_TMPDIR P_tmpdir +#elif defined( _PATH_TMPDIR ) +# define LDAP_TMPDIR _PATH_TMPDIR +#else +# define LDAP_TMPDIR LDAP_DIRSEP "tmp" +#endif + +/* directories */ +#ifndef LDAP_BINDIR +#define LDAP_BINDIR "/tmp/ldap-prefix/bin" +#endif +#ifndef LDAP_SBINDIR +#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin" +#endif +#ifndef LDAP_DATADIR +#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap" +#endif +#ifndef LDAP_SYSCONFDIR +#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap" +#endif +#ifndef LDAP_LIBEXECDIR +#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec" +#endif +#ifndef LDAP_MODULEDIR +#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap" +#endif +#ifndef LDAP_RUNDIR +#define LDAP_RUNDIR "/tmp/ldap-prefix/var" +#endif +#ifndef LDAP_LOCALEDIR +#define LDAP_LOCALEDIR "" +#endif + + +#endif /* _LDAP_CONFIG_H */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h b/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h new file mode 100644 index 00000000000..f0cc7c3626f --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h @@ -0,0 +1,61 @@ +/* include/ldap_features.h. Generated from ldap_features.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LDAP Features + */ + +#ifndef _LDAP_FEATURES_H +#define _LDAP_FEATURES_H 1 + +/* OpenLDAP API version macros */ +#define LDAP_VENDOR_VERSION 20501 +#define LDAP_VENDOR_VERSION_MAJOR 2 +#define LDAP_VENDOR_VERSION_MINOR 5 +#define LDAP_VENDOR_VERSION_PATCH X + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definitions and declarations may be incorrect. +** +*/ + +/* is -lldap_r available or not */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* LDAP v2 Referrals */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +#endif /* LDAP_FEATURES */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/portable.h b/contrib/openldap-cmake/darwin_aarch64/include/portable.h new file mode 100644 index 00000000000..fdf4e89017e --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/portable.h @@ -0,0 +1,1169 @@ +/* include/portable.h. Generated from portable.hin by configure. */ +/* include/portable.hin. Generated from configure.in by autoheader. */ + + +/* begin of portable.h.pre */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _LDAP_PORTABLE_H +#define _LDAP_PORTABLE_H + +/* define this if needed to get reentrant functions */ +#ifndef REENTRANT +#define REENTRANT 1 +#endif +#ifndef _REENTRANT +#define _REENTRANT 1 +#endif + +/* define this if needed to get threadsafe functions */ +#ifndef THREADSAFE +#define THREADSAFE 1 +#endif +#ifndef _THREADSAFE +#define _THREADSAFE 1 +#endif +#ifndef THREAD_SAFE +#define THREAD_SAFE 1 +#endif +#ifndef _THREAD_SAFE +#define _THREAD_SAFE 1 +#endif + +#ifndef _SGI_MP_SOURCE +#define _SGI_MP_SOURCE 1 +#endif + +/* end of portable.h.pre */ + + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* define to use both and */ +/* #undef BOTH_STRINGS_H */ + +/* define if cross compiling */ +/* #undef CROSS_COMPILING */ + +/* set to the number of arguments ctime_r() expects */ +#define CTIME_R_NARGS 2 + +/* define if toupper() requires islower() */ +/* #undef C_UPPER_LOWER */ + +/* define if sys_errlist is not declared in stdio.h or errno.h */ +/* #undef DECL_SYS_ERRLIST */ + +/* define to enable slapi library */ +/* #undef ENABLE_SLAPI */ + +/* defined to be the EXE extension */ +#define EXEEXT "" + +/* set to the number of arguments gethostbyaddr_r() expects */ +/* #undef GETHOSTBYADDR_R_NARGS */ + +/* set to the number of arguments gethostbyname_r() expects */ +/* #undef GETHOSTBYNAME_R_NARGS */ + +/* Define to 1 if `TIOCGWINSZ' requires . */ +/* #undef GWINSZ_IN_SYS_IOCTL */ + +/* define if you have AIX security lib */ +/* #undef HAVE_AIX_SECURITY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ASSERT_H 1 + +/* Define to 1 if you have the `bcopy' function. */ +#define HAVE_BCOPY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_BITS_TYPES_H */ + +/* Define to 1 if you have the `chroot' function. */ +#define HAVE_CHROOT 1 + +/* Define to 1 if you have the `closesocket' function. */ +/* #undef HAVE_CLOSESOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CONIO_H */ + +/* define if crypt(3) is available */ +/* #undef HAVE_CRYPT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CRYPT_H */ + +/* define if crypt_r() is also available */ +/* #undef HAVE_CRYPT_R */ + +/* Define to 1 if you have the `ctime_r' function. */ +#define HAVE_CTIME_R 1 + +/* define if you have Cyrus SASL */ +/* #undef HAVE_CYRUS_SASL */ + +/* define if your system supports /dev/poll */ +/* #undef HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DIRECT_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* define if system uses EBCDIC instead of ASCII */ +/* #undef HAVE_EBCDIC */ + +/* Define to 1 if you have the `endgrent' function. */ +#define HAVE_ENDGRENT 1 + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* define if your system supports epoll */ +/* #undef HAVE_EPOLL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the `fcntl' function. */ +#define HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* define if you actually have FreeBSD fetch(3) */ +/* #undef HAVE_FETCH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_FILIO_H */ + +/* Define to 1 if you have the `flock' function. */ +#define HAVE_FLOCK 1 + +/* Define to 1 if you have the `fstat' function. */ +#define HAVE_FSTAT 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getdtablesize' function. */ +#define HAVE_GETDTABLESIZE 1 + +/* Define to 1 if you have the `geteuid' function. */ +#define HAVE_GETEUID 1 + +/* Define to 1 if you have the `getgrgid' function. */ +#define HAVE_GETGRGID 1 + +/* Define to 1 if you have the `gethostbyaddr_r' function. */ +/* #undef HAVE_GETHOSTBYADDR_R */ + +/* Define to 1 if you have the `gethostbyname_r' function. */ +/* #undef HAVE_GETHOSTBYNAME_R */ + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getopt' function. */ +#define HAVE_GETOPT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_GETOPT_H 1 + +/* Define to 1 if you have the `getpassphrase' function. */ +/* #undef HAVE_GETPASSPHRASE */ + +/* Define to 1 if you have the `getpeereid' function. */ +#define HAVE_GETPEEREID 1 + +/* Define to 1 if you have the `getpeerucred' function. */ +/* #undef HAVE_GETPEERUCRED */ + +/* Define to 1 if you have the `getpwnam' function. */ +#define HAVE_GETPWNAM 1 + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `getspnam' function. */ +/* #undef HAVE_GETSPNAM */ + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GMP_H */ + +/* Define to 1 if you have the `gmtime_r' function. */ +#define HAVE_GMTIME_R 1 + +/* define if you have GNUtls */ +/* #undef HAVE_GNUTLS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GNUTLS_GNUTLS_H */ + +/* if you have GNU Pth */ +/* #undef HAVE_GNU_PTH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_GRP_H 1 + +/* Define to 1 if you have the `hstrerror' function. */ +#define HAVE_HSTRERROR 1 + +/* define to you inet_aton(3) is available */ +#define HAVE_INET_ATON 1 + +/* Define to 1 if you have the `inet_ntoa_b' function. */ +/* #undef HAVE_INET_NTOA_B */ + +/* Define to 1 if you have the `inet_ntop' function. */ +#define HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `initgroups' function. */ +#define HAVE_INITGROUPS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `ioctl' function. */ +#define HAVE_IOCTL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_IO_H */ + +/* define if your system supports kqueue */ +#define HAVE_KQUEUE 1 + +/* Define to 1 if you have the `gen' library (-lgen). */ +/* #undef HAVE_LIBGEN */ + +/* Define to 1 if you have the `gmp' library (-lgmp). */ +/* #undef HAVE_LIBGMP */ + +/* Define to 1 if you have the `inet' library (-linet). */ +/* #undef HAVE_LIBINET */ + +/* define if you have libtool -ltdl */ +/* #undef HAVE_LIBLTDL */ + +/* Define to 1 if you have the `net' library (-lnet). */ +/* #undef HAVE_LIBNET */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if you have the `nsl_s' library (-lnsl_s). */ +/* #undef HAVE_LIBNSL_S */ + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LIBUTIL_H */ + +/* Define to 1 if you have the `V3' library (-lV3). */ +/* #undef HAVE_LIBV3 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* if you have LinuxThreads */ +/* #undef HAVE_LINUX_THREADS */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lockf' function. */ +#define HAVE_LOCKF 1 + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LTDL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MALLOC_H */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memrchr' function. */ +/* #undef HAVE_MEMRCHR */ + +/* Define to 1 if you have the `mkstemp' function. */ +#define HAVE_MKSTEMP 1 + +/* Define to 1 if you have the `mktemp' function. */ +#define HAVE_MKTEMP 1 + +/* define this if you have mkversion */ +#define HAVE_MKVERSION 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* define if strerror_r returns char* instead of int */ +/* #undef HAVE_NONPOSIX_STRERROR_R */ + +/* if you have NT Event Log */ +/* #undef HAVE_NT_EVENT_LOG */ + +/* if you have NT Service Manager */ +/* #undef HAVE_NT_SERVICE_MANAGER */ + +/* if you have NT Threads */ +/* #undef HAVE_NT_THREADS */ + +/* define if you have OpenSSL */ +#define HAVE_OPENSSL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_BN_H 1 + +/* define if you have OpenSSL with CRL checking capability */ +#define HAVE_OPENSSL_CRL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_CRYPTO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the `pipe' function. */ +#define HAVE_PIPE 1 + +/* Define to 1 if you have the `poll' function. */ +#define HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PROCESS_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PSAP_H */ + +/* define to pthreads API spec revision */ +#define HAVE_PTHREADS 10 + +/* define if you have pthread_detach function */ +#define HAVE_PTHREAD_DETACH 1 + +/* Define to 1 if you have the `pthread_getconcurrency' function. */ +#define HAVE_PTHREAD_GETCONCURRENCY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if you have the `pthread_kill' function. */ +#define HAVE_PTHREAD_KILL 1 + +/* Define to 1 if you have the `pthread_kill_other_threads_np' function. */ +/* #undef HAVE_PTHREAD_KILL_OTHER_THREADS_NP */ + +/* define if you have pthread_rwlock_destroy function */ +#define HAVE_PTHREAD_RWLOCK_DESTROY 1 + +/* Define to 1 if you have the `pthread_setconcurrency' function. */ +#define HAVE_PTHREAD_SETCONCURRENCY 1 + +/* Define to 1 if you have the `pthread_yield' function. */ +/* #undef HAVE_PTHREAD_YIELD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTH_H */ + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `read' function. */ +#define HAVE_READ 1 + +/* Define to 1 if you have the `recv' function. */ +#define HAVE_RECV 1 + +/* Define to 1 if you have the `recvfrom' function. */ +#define HAVE_RECVFROM 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_REGEX_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_RESOLV_H */ + +/* define if you have res_query() */ +/* #undef HAVE_RES_QUERY */ + +/* define if OpenSSL needs RSAref */ +/* #undef HAVE_RSAREF */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_SASL_H */ + +/* define if your SASL library has sasl_version() */ +/* #undef HAVE_SASL_VERSION */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `send' function. */ +#define HAVE_SEND 1 + +/* Define to 1 if you have the `sendmsg' function. */ +#define HAVE_SENDMSG 1 + +/* Define to 1 if you have the `sendto' function. */ +#define HAVE_SENDTO 1 + +/* Define to 1 if you have the `setegid' function. */ +#define HAVE_SETEGID 1 + +/* Define to 1 if you have the `seteuid' function. */ +#define HAVE_SETEUID 1 + +/* Define to 1 if you have the `setgid' function. */ +#define HAVE_SETGID 1 + +/* Define to 1 if you have the `setpwfile' function. */ +/* #undef HAVE_SETPWFILE */ + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `setuid' function. */ +#define HAVE_SETUID 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SGTTY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SHADOW_H */ + +/* Define to 1 if you have the `sigaction' function. */ +#define HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the `sigset' function. */ +#define HAVE_SIGSET 1 + +/* define if you have -lslp */ +/* #undef HAVE_SLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SLP_H */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* if you have spawnlp() */ +/* #undef HAVE_SPAWNLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQLEXT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strpbrk' function. */ +#define HAVE_STRPBRK 1 + +/* Define to 1 if you have the `strrchr' function. */ +#define HAVE_STRRCHR 1 + +/* Define to 1 if you have the `strsep' function. */ +#define HAVE_STRSEP 1 + +/* Define to 1 if you have the `strspn' function. */ +#define HAVE_STRSPN 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the `strtoq' function. */ +#define HAVE_STRTOQ 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if you have the `strtouq' function. */ +#define HAVE_STRTOUQ 1 + +/* Define to 1 if `msg_accrightslen' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTSLEN */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_CONTROL */ + +/* Define to 1 if `pw_gecos' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_GECOS 1 + +/* Define to 1 if `pw_passwd' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_PASSWD 1 + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_fstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE */ + +/* define to 1 if st_fstype is char * */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_CHAR */ + +/* define to 1 if st_fstype is int */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_INT */ + +/* Define to 1 if `st_vfstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_VFSTYPE */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSEXITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DEVPOLL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_EPOLL_H */ + +/* define if you actually have sys_errlist in your libs */ +#define HAVE_SYS_ERRLIST 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_EVENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILIO_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FSTYP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRIVGRP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UCRED_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UUID_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_VMOUNT_H */ + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* define if you have -lwrap */ +/* #undef HAVE_TCPD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_TCPD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* if you have Solaris LWP (thr) package */ +/* #undef HAVE_THR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_THREAD_H */ + +/* Define to 1 if you have the `thr_getconcurrency' function. */ +/* #undef HAVE_THR_GETCONCURRENCY */ + +/* Define to 1 if you have the `thr_setconcurrency' function. */ +/* #undef HAVE_THR_SETCONCURRENCY */ + +/* Define to 1 if you have the `thr_yield' function. */ +/* #undef HAVE_THR_YIELD */ + +/* define if you have TLS */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UTIME_H 1 + +/* define if you have uuid_generate() */ +/* #undef HAVE_UUID_GENERATE */ + +/* define if you have uuid_to_str() */ +/* #undef HAVE_UUID_TO_STR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UUID_UUID_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `wait4' function. */ +#define HAVE_WAIT4 1 + +/* Define to 1 if you have the `waitpid' function. */ +#define HAVE_WAITPID 1 + +/* define if you have winsock */ +/* #undef HAVE_WINSOCK */ + +/* define if you have winsock2 */ +/* #undef HAVE_WINSOCK2 */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WIREDTIGER_H */ + +/* Define to 1 if you have the `write' function. */ +#define HAVE_WRITE 1 + +/* define if select implicitly yields */ +#define HAVE_YIELDING_SELECT 1 + +/* Define to 1 if you have the `_vsnprintf' function. */ +/* #undef HAVE__VSNPRINTF */ + +/* define to 32-bit or greater integer type */ +#define LBER_INT_T int + +/* define to large integer type */ +#define LBER_LEN_T long + +/* define to socket descriptor type */ +#define LBER_SOCKET_T int + +/* define to large integer type */ +#define LBER_TAG_T long + +/* define to 1 if library is thread safe */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* define to LDAP VENDOR VERSION */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +/* define this to add debugging code */ +/* #undef LDAP_DEBUG */ + +/* define if LDAP libs are dynamic */ +/* #undef LDAP_LIBS_DYNAMIC */ + +/* define to support PF_INET6 */ +#define LDAP_PF_INET6 1 + +/* define to support PF_LOCAL */ +#define LDAP_PF_LOCAL 1 + +/* define this to add SLAPI code */ +/* #undef LDAP_SLAPI */ + +/* define this to add syslog code */ +/* #undef LDAP_SYSLOG */ + +/* Version */ +#define LDAP_VENDOR_VERSION 20501 + +/* Major */ +#define LDAP_VENDOR_VERSION_MAJOR 2 + +/* Minor */ +#define LDAP_VENDOR_VERSION_MINOR 5 + +/* Patch */ +#define LDAP_VENDOR_VERSION_PATCH X + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* define if memcmp is not 8-bit clean or is otherwise broken */ +/* #undef NEED_MEMCMP_REPLACEMENT */ + +/* define if you have (or want) no threads */ +/* #undef NO_THREADS */ + +/* define to use the original debug style */ +/* #undef OLD_DEBUG */ + +/* Package */ +#define OPENLDAP_PACKAGE "OpenLDAP" + +/* Version */ +#define OPENLDAP_VERSION "2.5.X" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* define if sched_yield yields the entire process */ +/* #undef REPLACE_BROKEN_YIELD */ + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* The size of `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long long', as computed by sizeof. */ +#define SIZEOF_LONG_LONG 8 + +/* The size of `short', as computed by sizeof. */ +#define SIZEOF_SHORT 2 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 4 + +/* define to support per-object ACIs */ +/* #undef SLAPD_ACI_ENABLED */ + +/* define to support LDAP Async Metadirectory backend */ +/* #undef SLAPD_ASYNCMETA */ + +/* define to support cleartext passwords */ +/* #undef SLAPD_CLEARTEXT */ + +/* define to support crypt(3) passwords */ +/* #undef SLAPD_CRYPT */ + +/* define to support DNS SRV backend */ +/* #undef SLAPD_DNSSRV */ + +/* define to support LDAP backend */ +/* #undef SLAPD_LDAP */ + +/* define to support MDB backend */ +/* #undef SLAPD_MDB */ + +/* define to support LDAP Metadirectory backend */ +/* #undef SLAPD_META */ + +/* define to support modules */ +/* #undef SLAPD_MODULES */ + +/* dynamically linked module */ +#define SLAPD_MOD_DYNAMIC 2 + +/* statically linked module */ +#define SLAPD_MOD_STATIC 1 + +/* define to support cn=Monitor backend */ +/* #undef SLAPD_MONITOR */ + +/* define to support NDB backend */ +/* #undef SLAPD_NDB */ + +/* define to support NULL backend */ +/* #undef SLAPD_NULL */ + +/* define for In-Directory Access Logging overlay */ +/* #undef SLAPD_OVER_ACCESSLOG */ + +/* define for Audit Logging overlay */ +/* #undef SLAPD_OVER_AUDITLOG */ + +/* define for Automatic Certificate Authority overlay */ +/* #undef SLAPD_OVER_AUTOCA */ + +/* define for Collect overlay */ +/* #undef SLAPD_OVER_COLLECT */ + +/* define for Attribute Constraint overlay */ +/* #undef SLAPD_OVER_CONSTRAINT */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DDS */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DEREF */ + +/* define for Dynamic Group overlay */ +/* #undef SLAPD_OVER_DYNGROUP */ + +/* define for Dynamic List overlay */ +/* #undef SLAPD_OVER_DYNLIST */ + +/* define for Reverse Group Membership overlay */ +/* #undef SLAPD_OVER_MEMBEROF */ + +/* define for Password Policy overlay */ +/* #undef SLAPD_OVER_PPOLICY */ + +/* define for Proxy Cache overlay */ +/* #undef SLAPD_OVER_PROXYCACHE */ + +/* define for Referential Integrity overlay */ +/* #undef SLAPD_OVER_REFINT */ + +/* define for Return Code overlay */ +/* #undef SLAPD_OVER_RETCODE */ + +/* define for Rewrite/Remap overlay */ +/* #undef SLAPD_OVER_RWM */ + +/* define for Sequential Modify overlay */ +/* #undef SLAPD_OVER_SEQMOD */ + +/* define for ServerSideSort/VLV overlay */ +/* #undef SLAPD_OVER_SSSVLV */ + +/* define for Syncrepl Provider overlay */ +/* #undef SLAPD_OVER_SYNCPROV */ + +/* define for Translucent Proxy overlay */ +/* #undef SLAPD_OVER_TRANSLUCENT */ + +/* define for Attribute Uniqueness overlay */ +/* #undef SLAPD_OVER_UNIQUE */ + +/* define for Value Sorting overlay */ +/* #undef SLAPD_OVER_VALSORT */ + +/* define to support PASSWD backend */ +/* #undef SLAPD_PASSWD */ + +/* define to support PERL backend */ +/* #undef SLAPD_PERL */ + +/* define to support relay backend */ +/* #undef SLAPD_RELAY */ + +/* define to support reverse lookups */ +/* #undef SLAPD_RLOOKUPS */ + +/* define to support SHELL backend */ +/* #undef SLAPD_SHELL */ + +/* define to support SOCK backend */ +/* #undef SLAPD_SOCK */ + +/* define to support SASL passwords */ +/* #undef SLAPD_SPASSWD */ + +/* define to support SQL backend */ +/* #undef SLAPD_SQL */ + +/* define to support WiredTiger backend */ +/* #undef SLAPD_WT */ + +/* define to support run-time loadable ACL */ +/* #undef SLAP_DYNACL */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* set to urandom device */ +#define URANDOM_DEVICE "/dev/urandom" + +/* define to use OpenSSL BIGNUM for MP */ +/* #undef USE_MP_BIGNUM */ + +/* define to use GMP for MP */ +/* #undef USE_MP_GMP */ + +/* define to use 'long' for MP */ +/* #undef USE_MP_LONG */ + +/* define to use 'long long' for MP */ +/* #undef USE_MP_LONG_LONG */ + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to the type of arg 3 for `accept'. */ +#define ber_socklen_t socklen_t + +/* Define to `char *' if does not define. */ +/* #undef caddr_t */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to `int' if does not define. */ +/* #undef sig_atomic_t */ + +/* Define to `unsigned' if does not define. */ +/* #undef size_t */ + +/* define to snprintf routine */ +/* #undef snprintf */ + +/* Define like ber_socklen_t if does not define. */ +/* #undef socklen_t */ + +/* Define to `signed int' if does not define. */ +/* #undef ssize_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* define as empty if volatile is not supported */ +/* #undef volatile */ + +/* define to snprintf routine */ +/* #undef vsnprintf */ + + +/* begin of portable.h.post */ + +#ifdef _WIN32 +/* don't suck in all of the win32 api */ +# define WIN32_LEAN_AND_MEAN 1 +#endif + +#ifndef LDAP_NEEDS_PROTOTYPES +/* force LDAP_P to always include prototypes */ +#define LDAP_NEEDS_PROTOTYPES 1 +#endif + +#ifndef LDAP_REL_ENG +#if (LDAP_VENDOR_VERSION == 000000) && !defined(LDAP_DEVEL) +#define LDAP_DEVEL +#endif +#if defined(LDAP_DEVEL) && !defined(LDAP_TEST) +#define LDAP_TEST +#endif +#endif + +#ifdef HAVE_STDDEF_H +# include +#endif + +#ifdef HAVE_EBCDIC +/* ASCII/EBCDIC converting replacements for stdio funcs + * vsnprintf and snprintf are used too, but they are already + * checked by the configure script + */ +#define fputs ber_pvt_fputs +#define fgets ber_pvt_fgets +#define printf ber_pvt_printf +#define fprintf ber_pvt_fprintf +#define vfprintf ber_pvt_vfprintf +#define vsprintf ber_pvt_vsprintf +#endif + +#include "ac/fdset.h" + +#include "ldap_cdefs.h" +#include "ldap_features.h" + +#include "ac/assert.h" +#include "ac/localize.h" + +#endif /* _LDAP_PORTABLE_H */ +/* end of portable.h.post */ + diff --git a/contrib/openldap-cmake/linux_ppc64le/include/lber_types.h b/contrib/openldap-cmake/linux_ppc64le/include/lber_types.h new file mode 100644 index 00000000000..dbd59430527 --- /dev/null +++ b/contrib/openldap-cmake/linux_ppc64le/include/lber_types.h @@ -0,0 +1,63 @@ +/* include/lber_types.h. Generated from lber_types.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LBER types + */ + +#ifndef _LBER_TYPES_H +#define _LBER_TYPES_H + +#include + +LDAP_BEGIN_DECL + +/* LBER boolean, enum, integers (32 bits or larger) */ +#define LBER_INT_T int + +/* LBER tags (32 bits or larger) */ +#define LBER_TAG_T long + +/* LBER socket descriptor */ +#define LBER_SOCKET_T int + +/* LBER lengths (32 bits or larger) */ +#define LBER_LEN_T long + +/* ------------------------------------------------------------ */ + +/* booleans, enumerations, and integers */ +typedef LBER_INT_T ber_int_t; + +/* signed and unsigned versions */ +typedef signed LBER_INT_T ber_sint_t; +typedef unsigned LBER_INT_T ber_uint_t; + +/* tags */ +typedef unsigned LBER_TAG_T ber_tag_t; + +/* "socket" descriptors */ +typedef LBER_SOCKET_T ber_socket_t; + +/* lengths */ +typedef unsigned LBER_LEN_T ber_len_t; + +/* signed lengths */ +typedef signed LBER_LEN_T ber_slen_t; + +LDAP_END_DECL + +#endif /* _LBER_TYPES_H */ diff --git a/contrib/openldap-cmake/linux_ppc64le/include/ldap_config.h b/contrib/openldap-cmake/linux_ppc64le/include/ldap_config.h new file mode 100644 index 00000000000..89f7b40b884 --- /dev/null +++ b/contrib/openldap-cmake/linux_ppc64le/include/ldap_config.h @@ -0,0 +1,74 @@ +/* include/ldap_config.h. Generated from ldap_config.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * This file works in conjunction with OpenLDAP configure system. + * If you do no like the values below, adjust your configure options. + */ + +#ifndef _LDAP_CONFIG_H +#define _LDAP_CONFIG_H + +/* directory separator */ +#ifndef LDAP_DIRSEP +#ifndef _WIN32 +#define LDAP_DIRSEP "/" +#else +#define LDAP_DIRSEP "\\" +#endif +#endif + +/* directory for temporary files */ +#if defined(_WIN32) +# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */ +#elif defined( _P_tmpdir ) +# define LDAP_TMPDIR _P_tmpdir +#elif defined( P_tmpdir ) +# define LDAP_TMPDIR P_tmpdir +#elif defined( _PATH_TMPDIR ) +# define LDAP_TMPDIR _PATH_TMPDIR +#else +# define LDAP_TMPDIR LDAP_DIRSEP "tmp" +#endif + +/* directories */ +#ifndef LDAP_BINDIR +#define LDAP_BINDIR "/tmp/ldap-prefix/bin" +#endif +#ifndef LDAP_SBINDIR +#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin" +#endif +#ifndef LDAP_DATADIR +#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap" +#endif +#ifndef LDAP_SYSCONFDIR +#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap" +#endif +#ifndef LDAP_LIBEXECDIR +#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec" +#endif +#ifndef LDAP_MODULEDIR +#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap" +#endif +#ifndef LDAP_RUNDIR +#define LDAP_RUNDIR "/tmp/ldap-prefix/var" +#endif +#ifndef LDAP_LOCALEDIR +#define LDAP_LOCALEDIR "" +#endif + + +#endif /* _LDAP_CONFIG_H */ diff --git a/contrib/openldap-cmake/linux_ppc64le/include/ldap_features.h b/contrib/openldap-cmake/linux_ppc64le/include/ldap_features.h new file mode 100644 index 00000000000..f0cc7c3626f --- /dev/null +++ b/contrib/openldap-cmake/linux_ppc64le/include/ldap_features.h @@ -0,0 +1,61 @@ +/* include/ldap_features.h. Generated from ldap_features.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LDAP Features + */ + +#ifndef _LDAP_FEATURES_H +#define _LDAP_FEATURES_H 1 + +/* OpenLDAP API version macros */ +#define LDAP_VENDOR_VERSION 20501 +#define LDAP_VENDOR_VERSION_MAJOR 2 +#define LDAP_VENDOR_VERSION_MINOR 5 +#define LDAP_VENDOR_VERSION_PATCH X + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definitions and declarations may be incorrect. +** +*/ + +/* is -lldap_r available or not */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* LDAP v2 Referrals */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +#endif /* LDAP_FEATURES */ diff --git a/contrib/openldap-cmake/linux_ppc64le/include/portable.h b/contrib/openldap-cmake/linux_ppc64le/include/portable.h new file mode 100644 index 00000000000..2924b6713a4 --- /dev/null +++ b/contrib/openldap-cmake/linux_ppc64le/include/portable.h @@ -0,0 +1,1169 @@ +/* include/portable.h. Generated from portable.hin by configure. */ +/* include/portable.hin. Generated from configure.in by autoheader. */ + + +/* begin of portable.h.pre */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _LDAP_PORTABLE_H +#define _LDAP_PORTABLE_H + +/* define this if needed to get reentrant functions */ +#ifndef REENTRANT +#define REENTRANT 1 +#endif +#ifndef _REENTRANT +#define _REENTRANT 1 +#endif + +/* define this if needed to get threadsafe functions */ +#ifndef THREADSAFE +#define THREADSAFE 1 +#endif +#ifndef _THREADSAFE +#define _THREADSAFE 1 +#endif +#ifndef THREAD_SAFE +#define THREAD_SAFE 1 +#endif +#ifndef _THREAD_SAFE +#define _THREAD_SAFE 1 +#endif + +#ifndef _SGI_MP_SOURCE +#define _SGI_MP_SOURCE 1 +#endif + +/* end of portable.h.pre */ + + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* define to use both and */ +/* #undef BOTH_STRINGS_H */ + +/* define if cross compiling */ +/* #undef CROSS_COMPILING */ + +/* set to the number of arguments ctime_r() expects */ +#define CTIME_R_NARGS 2 + +/* define if toupper() requires islower() */ +/* #undef C_UPPER_LOWER */ + +/* define if sys_errlist is not declared in stdio.h or errno.h */ +/* #undef DECL_SYS_ERRLIST */ + +/* define to enable slapi library */ +/* #undef ENABLE_SLAPI */ + +/* defined to be the EXE extension */ +#define EXEEXT "" + +/* set to the number of arguments gethostbyaddr_r() expects */ +#define GETHOSTBYADDR_R_NARGS 8 + +/* set to the number of arguments gethostbyname_r() expects */ +#define GETHOSTBYNAME_R_NARGS 6 + +/* Define to 1 if `TIOCGWINSZ' requires . */ +#define GWINSZ_IN_SYS_IOCTL 1 + +/* define if you have AIX security lib */ +/* #undef HAVE_AIX_SECURITY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ASSERT_H 1 + +/* Define to 1 if you have the `bcopy' function. */ +#define HAVE_BCOPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_BITS_TYPES_H 1 + +/* Define to 1 if you have the `chroot' function. */ +#define HAVE_CHROOT 1 + +/* Define to 1 if you have the `closesocket' function. */ +/* #undef HAVE_CLOSESOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CONIO_H */ + +/* define if crypt(3) is available */ +/* #undef HAVE_CRYPT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CRYPT_H */ + +/* define if crypt_r() is also available */ +/* #undef HAVE_CRYPT_R */ + +/* Define to 1 if you have the `ctime_r' function. */ +#define HAVE_CTIME_R 1 + +/* define if you have Cyrus SASL */ +/* #undef HAVE_CYRUS_SASL */ + +/* define if your system supports /dev/poll */ +/* #undef HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DIRECT_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* define if system uses EBCDIC instead of ASCII */ +/* #undef HAVE_EBCDIC */ + +/* Define to 1 if you have the `endgrent' function. */ +#define HAVE_ENDGRENT 1 + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* define if your system supports epoll */ +#define HAVE_EPOLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the `fcntl' function. */ +#define HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* define if you actually have FreeBSD fetch(3) */ +/* #undef HAVE_FETCH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_FILIO_H */ + +/* Define to 1 if you have the `flock' function. */ +#define HAVE_FLOCK 1 + +/* Define to 1 if you have the `fstat' function. */ +#define HAVE_FSTAT 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getdtablesize' function. */ +#define HAVE_GETDTABLESIZE 1 + +/* Define to 1 if you have the `geteuid' function. */ +#define HAVE_GETEUID 1 + +/* Define to 1 if you have the `getgrgid' function. */ +#define HAVE_GETGRGID 1 + +/* Define to 1 if you have the `gethostbyaddr_r' function. */ +#define HAVE_GETHOSTBYADDR_R 1 + +/* Define to 1 if you have the `gethostbyname_r' function. */ +#define HAVE_GETHOSTBYNAME_R 1 + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getopt' function. */ +#define HAVE_GETOPT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_GETOPT_H 1 + +/* Define to 1 if you have the `getpassphrase' function. */ +/* #undef HAVE_GETPASSPHRASE */ + +/* Define to 1 if you have the `getpeereid' function. */ +/* #undef HAVE_GETPEEREID */ + +/* Define to 1 if you have the `getpeerucred' function. */ +/* #undef HAVE_GETPEERUCRED */ + +/* Define to 1 if you have the `getpwnam' function. */ +#define HAVE_GETPWNAM 1 + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `getspnam' function. */ +#define HAVE_GETSPNAM 1 + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GMP_H */ + +/* Define to 1 if you have the `gmtime_r' function. */ +#define HAVE_GMTIME_R 1 + +/* define if you have GNUtls */ +/* #undef HAVE_GNUTLS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GNUTLS_GNUTLS_H */ + +/* if you have GNU Pth */ +/* #undef HAVE_GNU_PTH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_GRP_H 1 + +/* Define to 1 if you have the `hstrerror' function. */ +#define HAVE_HSTRERROR 1 + +/* define to you inet_aton(3) is available */ +#define HAVE_INET_ATON 1 + +/* Define to 1 if you have the `inet_ntoa_b' function. */ +/* #undef HAVE_INET_NTOA_B */ + +/* Define to 1 if you have the `inet_ntop' function. */ +#define HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `initgroups' function. */ +#define HAVE_INITGROUPS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `ioctl' function. */ +#define HAVE_IOCTL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_IO_H */ + +/* define if your system supports kqueue */ +/* #undef HAVE_KQUEUE */ + +/* Define to 1 if you have the `gen' library (-lgen). */ +/* #undef HAVE_LIBGEN */ + +/* Define to 1 if you have the `gmp' library (-lgmp). */ +/* #undef HAVE_LIBGMP */ + +/* Define to 1 if you have the `inet' library (-linet). */ +/* #undef HAVE_LIBINET */ + +/* define if you have libtool -ltdl */ +/* #undef HAVE_LIBLTDL */ + +/* Define to 1 if you have the `net' library (-lnet). */ +/* #undef HAVE_LIBNET */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if you have the `nsl_s' library (-lnsl_s). */ +/* #undef HAVE_LIBNSL_S */ + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LIBUTIL_H */ + +/* Define to 1 if you have the `V3' library (-lV3). */ +/* #undef HAVE_LIBV3 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* if you have LinuxThreads */ +/* #undef HAVE_LINUX_THREADS */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lockf' function. */ +#define HAVE_LOCKF 1 + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LTDL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memrchr' function. */ +#define HAVE_MEMRCHR 1 + +/* Define to 1 if you have the `mkstemp' function. */ +#define HAVE_MKSTEMP 1 + +/* Define to 1 if you have the `mktemp' function. */ +#define HAVE_MKTEMP 1 + +/* define this if you have mkversion */ +#define HAVE_MKVERSION 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* define if strerror_r returns char* instead of int */ +/* #undef HAVE_NONPOSIX_STRERROR_R */ + +/* if you have NT Event Log */ +/* #undef HAVE_NT_EVENT_LOG */ + +/* if you have NT Service Manager */ +/* #undef HAVE_NT_SERVICE_MANAGER */ + +/* if you have NT Threads */ +/* #undef HAVE_NT_THREADS */ + +/* define if you have OpenSSL */ +#define HAVE_OPENSSL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_BN_H 1 + +/* define if you have OpenSSL with CRL checking capability */ +#define HAVE_OPENSSL_CRL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_CRYPTO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the `pipe' function. */ +#define HAVE_PIPE 1 + +/* Define to 1 if you have the `poll' function. */ +#define HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PROCESS_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PSAP_H */ + +/* define to pthreads API spec revision */ +#define HAVE_PTHREADS 10 + +/* define if you have pthread_detach function */ +#define HAVE_PTHREAD_DETACH 1 + +/* Define to 1 if you have the `pthread_getconcurrency' function. */ +#define HAVE_PTHREAD_GETCONCURRENCY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if you have the `pthread_kill' function. */ +#define HAVE_PTHREAD_KILL 1 + +/* Define to 1 if you have the `pthread_kill_other_threads_np' function. */ +/* #undef HAVE_PTHREAD_KILL_OTHER_THREADS_NP */ + +/* define if you have pthread_rwlock_destroy function */ +#define HAVE_PTHREAD_RWLOCK_DESTROY 1 + +/* Define to 1 if you have the `pthread_setconcurrency' function. */ +#define HAVE_PTHREAD_SETCONCURRENCY 1 + +/* Define to 1 if you have the `pthread_yield' function. */ +#define HAVE_PTHREAD_YIELD 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTH_H */ + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `read' function. */ +#define HAVE_READ 1 + +/* Define to 1 if you have the `recv' function. */ +#define HAVE_RECV 1 + +/* Define to 1 if you have the `recvfrom' function. */ +#define HAVE_RECVFROM 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_REGEX_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_RESOLV_H */ + +/* define if you have res_query() */ +/* #undef HAVE_RES_QUERY */ + +/* define if OpenSSL needs RSAref */ +/* #undef HAVE_RSAREF */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_SASL_H */ + +/* define if your SASL library has sasl_version() */ +/* #undef HAVE_SASL_VERSION */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `send' function. */ +#define HAVE_SEND 1 + +/* Define to 1 if you have the `sendmsg' function. */ +#define HAVE_SENDMSG 1 + +/* Define to 1 if you have the `sendto' function. */ +#define HAVE_SENDTO 1 + +/* Define to 1 if you have the `setegid' function. */ +#define HAVE_SETEGID 1 + +/* Define to 1 if you have the `seteuid' function. */ +#define HAVE_SETEUID 1 + +/* Define to 1 if you have the `setgid' function. */ +#define HAVE_SETGID 1 + +/* Define to 1 if you have the `setpwfile' function. */ +/* #undef HAVE_SETPWFILE */ + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `setuid' function. */ +#define HAVE_SETUID 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SGTTY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SHADOW_H */ + +/* Define to 1 if you have the `sigaction' function. */ +#define HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the `sigset' function. */ +#define HAVE_SIGSET 1 + +/* define if you have -lslp */ +/* #undef HAVE_SLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SLP_H */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* if you have spawnlp() */ +/* #undef HAVE_SPAWNLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQLEXT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strpbrk' function. */ +#define HAVE_STRPBRK 1 + +/* Define to 1 if you have the `strrchr' function. */ +#define HAVE_STRRCHR 1 + +/* Define to 1 if you have the `strsep' function. */ +#define HAVE_STRSEP 1 + +/* Define to 1 if you have the `strspn' function. */ +#define HAVE_STRSPN 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the `strtoq' function. */ +#define HAVE_STRTOQ 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if you have the `strtouq' function. */ +#define HAVE_STRTOUQ 1 + +/* Define to 1 if `msg_accrightslen' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTSLEN */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +#define HAVE_STRUCT_MSGHDR_MSG_CONTROL 1 + +/* Define to 1 if `pw_gecos' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_GECOS 1 + +/* Define to 1 if `pw_passwd' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_PASSWD 1 + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_fstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE */ + +/* define to 1 if st_fstype is char * */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_CHAR */ + +/* define to 1 if st_fstype is int */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_INT */ + +/* Define to 1 if `st_vfstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_VFSTYPE */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSEXITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DEVPOLL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_EPOLL_H 1 + +/* define if you actually have sys_errlist in your libs */ +#define HAVE_SYS_ERRLIST 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_EVENT_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILE_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FILIO_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FSTYP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRIVGRP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UCRED_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UUID_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_VMOUNT_H */ + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* define if you have -lwrap */ +/* #undef HAVE_TCPD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_TCPD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* if you have Solaris LWP (thr) package */ +/* #undef HAVE_THR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_THREAD_H */ + +/* Define to 1 if you have the `thr_getconcurrency' function. */ +/* #undef HAVE_THR_GETCONCURRENCY */ + +/* Define to 1 if you have the `thr_setconcurrency' function. */ +/* #undef HAVE_THR_SETCONCURRENCY */ + +/* Define to 1 if you have the `thr_yield' function. */ +/* #undef HAVE_THR_YIELD */ + +/* define if you have TLS */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UTIME_H 1 + +/* define if you have uuid_generate() */ +/* #undef HAVE_UUID_GENERATE */ + +/* define if you have uuid_to_str() */ +/* #undef HAVE_UUID_TO_STR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UUID_UUID_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `wait4' function. */ +#define HAVE_WAIT4 1 + +/* Define to 1 if you have the `waitpid' function. */ +#define HAVE_WAITPID 1 + +/* define if you have winsock */ +/* #undef HAVE_WINSOCK */ + +/* define if you have winsock2 */ +/* #undef HAVE_WINSOCK2 */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WIREDTIGER_H */ + +/* Define to 1 if you have the `write' function. */ +#define HAVE_WRITE 1 + +/* define if select implicitly yields */ +#define HAVE_YIELDING_SELECT 1 + +/* Define to 1 if you have the `_vsnprintf' function. */ +/* #undef HAVE__VSNPRINTF */ + +/* define to 32-bit or greater integer type */ +#define LBER_INT_T int + +/* define to large integer type */ +#define LBER_LEN_T long + +/* define to socket descriptor type */ +#define LBER_SOCKET_T int + +/* define to large integer type */ +#define LBER_TAG_T long + +/* define to 1 if library is thread safe */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* define to LDAP VENDOR VERSION */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +/* define this to add debugging code */ +/* #undef LDAP_DEBUG */ + +/* define if LDAP libs are dynamic */ +/* #undef LDAP_LIBS_DYNAMIC */ + +/* define to support PF_INET6 */ +#define LDAP_PF_INET6 1 + +/* define to support PF_LOCAL */ +#define LDAP_PF_LOCAL 1 + +/* define this to add SLAPI code */ +/* #undef LDAP_SLAPI */ + +/* define this to add syslog code */ +/* #undef LDAP_SYSLOG */ + +/* Version */ +#define LDAP_VENDOR_VERSION 20501 + +/* Major */ +#define LDAP_VENDOR_VERSION_MAJOR 2 + +/* Minor */ +#define LDAP_VENDOR_VERSION_MINOR 5 + +/* Patch */ +#define LDAP_VENDOR_VERSION_PATCH X + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* define if memcmp is not 8-bit clean or is otherwise broken */ +/* #undef NEED_MEMCMP_REPLACEMENT */ + +/* define if you have (or want) no threads */ +/* #undef NO_THREADS */ + +/* define to use the original debug style */ +/* #undef OLD_DEBUG */ + +/* Package */ +#define OPENLDAP_PACKAGE "OpenLDAP" + +/* Version */ +#define OPENLDAP_VERSION "2.5.X" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* define if sched_yield yields the entire process */ +/* #undef REPLACE_BROKEN_YIELD */ + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* The size of `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long long', as computed by sizeof. */ +#define SIZEOF_LONG_LONG 8 + +/* The size of `short', as computed by sizeof. */ +#define SIZEOF_SHORT 2 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 4 + +/* define to support per-object ACIs */ +/* #undef SLAPD_ACI_ENABLED */ + +/* define to support LDAP Async Metadirectory backend */ +/* #undef SLAPD_ASYNCMETA */ + +/* define to support cleartext passwords */ +/* #undef SLAPD_CLEARTEXT */ + +/* define to support crypt(3) passwords */ +/* #undef SLAPD_CRYPT */ + +/* define to support DNS SRV backend */ +/* #undef SLAPD_DNSSRV */ + +/* define to support LDAP backend */ +/* #undef SLAPD_LDAP */ + +/* define to support MDB backend */ +/* #undef SLAPD_MDB */ + +/* define to support LDAP Metadirectory backend */ +/* #undef SLAPD_META */ + +/* define to support modules */ +/* #undef SLAPD_MODULES */ + +/* dynamically linked module */ +#define SLAPD_MOD_DYNAMIC 2 + +/* statically linked module */ +#define SLAPD_MOD_STATIC 1 + +/* define to support cn=Monitor backend */ +/* #undef SLAPD_MONITOR */ + +/* define to support NDB backend */ +/* #undef SLAPD_NDB */ + +/* define to support NULL backend */ +/* #undef SLAPD_NULL */ + +/* define for In-Directory Access Logging overlay */ +/* #undef SLAPD_OVER_ACCESSLOG */ + +/* define for Audit Logging overlay */ +/* #undef SLAPD_OVER_AUDITLOG */ + +/* define for Automatic Certificate Authority overlay */ +/* #undef SLAPD_OVER_AUTOCA */ + +/* define for Collect overlay */ +/* #undef SLAPD_OVER_COLLECT */ + +/* define for Attribute Constraint overlay */ +/* #undef SLAPD_OVER_CONSTRAINT */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DDS */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DEREF */ + +/* define for Dynamic Group overlay */ +/* #undef SLAPD_OVER_DYNGROUP */ + +/* define for Dynamic List overlay */ +/* #undef SLAPD_OVER_DYNLIST */ + +/* define for Reverse Group Membership overlay */ +/* #undef SLAPD_OVER_MEMBEROF */ + +/* define for Password Policy overlay */ +/* #undef SLAPD_OVER_PPOLICY */ + +/* define for Proxy Cache overlay */ +/* #undef SLAPD_OVER_PROXYCACHE */ + +/* define for Referential Integrity overlay */ +/* #undef SLAPD_OVER_REFINT */ + +/* define for Return Code overlay */ +/* #undef SLAPD_OVER_RETCODE */ + +/* define for Rewrite/Remap overlay */ +/* #undef SLAPD_OVER_RWM */ + +/* define for Sequential Modify overlay */ +/* #undef SLAPD_OVER_SEQMOD */ + +/* define for ServerSideSort/VLV overlay */ +/* #undef SLAPD_OVER_SSSVLV */ + +/* define for Syncrepl Provider overlay */ +/* #undef SLAPD_OVER_SYNCPROV */ + +/* define for Translucent Proxy overlay */ +/* #undef SLAPD_OVER_TRANSLUCENT */ + +/* define for Attribute Uniqueness overlay */ +/* #undef SLAPD_OVER_UNIQUE */ + +/* define for Value Sorting overlay */ +/* #undef SLAPD_OVER_VALSORT */ + +/* define to support PASSWD backend */ +/* #undef SLAPD_PASSWD */ + +/* define to support PERL backend */ +/* #undef SLAPD_PERL */ + +/* define to support relay backend */ +/* #undef SLAPD_RELAY */ + +/* define to support reverse lookups */ +/* #undef SLAPD_RLOOKUPS */ + +/* define to support SHELL backend */ +/* #undef SLAPD_SHELL */ + +/* define to support SOCK backend */ +/* #undef SLAPD_SOCK */ + +/* define to support SASL passwords */ +/* #undef SLAPD_SPASSWD */ + +/* define to support SQL backend */ +/* #undef SLAPD_SQL */ + +/* define to support WiredTiger backend */ +/* #undef SLAPD_WT */ + +/* define to support run-time loadable ACL */ +/* #undef SLAP_DYNACL */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* set to urandom device */ +#define URANDOM_DEVICE "/dev/urandom" + +/* define to use OpenSSL BIGNUM for MP */ +/* #undef USE_MP_BIGNUM */ + +/* define to use GMP for MP */ +/* #undef USE_MP_GMP */ + +/* define to use 'long' for MP */ +/* #undef USE_MP_LONG */ + +/* define to use 'long long' for MP */ +/* #undef USE_MP_LONG_LONG */ + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to the type of arg 3 for `accept'. */ +#define ber_socklen_t socklen_t + +/* Define to `char *' if does not define. */ +/* #undef caddr_t */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to `int' if does not define. */ +/* #undef sig_atomic_t */ + +/* Define to `unsigned' if does not define. */ +/* #undef size_t */ + +/* define to snprintf routine */ +/* #undef snprintf */ + +/* Define like ber_socklen_t if does not define. */ +/* #undef socklen_t */ + +/* Define to `signed int' if does not define. */ +/* #undef ssize_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* define as empty if volatile is not supported */ +/* #undef volatile */ + +/* define to snprintf routine */ +/* #undef vsnprintf */ + + +/* begin of portable.h.post */ + +#ifdef _WIN32 +/* don't suck in all of the win32 api */ +# define WIN32_LEAN_AND_MEAN 1 +#endif + +#ifndef LDAP_NEEDS_PROTOTYPES +/* force LDAP_P to always include prototypes */ +#define LDAP_NEEDS_PROTOTYPES 1 +#endif + +#ifndef LDAP_REL_ENG +#if (LDAP_VENDOR_VERSION == 000000) && !defined(LDAP_DEVEL) +#define LDAP_DEVEL +#endif +#if defined(LDAP_DEVEL) && !defined(LDAP_TEST) +#define LDAP_TEST +#endif +#endif + +#ifdef HAVE_STDDEF_H +# include +#endif + +#ifdef HAVE_EBCDIC +/* ASCII/EBCDIC converting replacements for stdio funcs + * vsnprintf and snprintf are used too, but they are already + * checked by the configure script + */ +#define fputs ber_pvt_fputs +#define fgets ber_pvt_fgets +#define printf ber_pvt_printf +#define fprintf ber_pvt_fprintf +#define vfprintf ber_pvt_vfprintf +#define vsprintf ber_pvt_vsprintf +#endif + +#include "ac/fdset.h" + +#include "ldap_cdefs.h" +#include "ldap_features.h" + +#include "ac/assert.h" +#include "ac/localize.h" + +#endif /* _LDAP_PORTABLE_H */ +/* end of portable.h.post */ + diff --git a/contrib/poco b/contrib/poco index 83beecccb09..59945069080 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 83beecccb09eec0c9fd2669cacea03ede1d9f138 +Subproject commit 5994506908028612869fee627d68d8212dfe7c1e diff --git a/contrib/poco-cmake/CMakeLists.txt b/contrib/poco-cmake/CMakeLists.txt index 1d2dc7b873e..d173f35b9bf 100644 --- a/contrib/poco-cmake/CMakeLists.txt +++ b/contrib/poco-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/poco) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/poco") add_subdirectory (Crypto) add_subdirectory (Data) diff --git a/contrib/poco-cmake/Crypto/CMakeLists.txt b/contrib/poco-cmake/Crypto/CMakeLists.txt index 1685e96728b..e93ed5cf17d 100644 --- a/contrib/poco-cmake/Crypto/CMakeLists.txt +++ b/contrib/poco-cmake/Crypto/CMakeLists.txt @@ -1,35 +1,35 @@ if (ENABLE_SSL) if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Crypto/src/Cipher.cpp - ${LIBRARY_DIR}/Crypto/src/CipherFactory.cpp - ${LIBRARY_DIR}/Crypto/src/CipherImpl.cpp - ${LIBRARY_DIR}/Crypto/src/CipherKey.cpp - ${LIBRARY_DIR}/Crypto/src/CipherKeyImpl.cpp - ${LIBRARY_DIR}/Crypto/src/CryptoException.cpp - ${LIBRARY_DIR}/Crypto/src/CryptoStream.cpp - ${LIBRARY_DIR}/Crypto/src/CryptoTransform.cpp - ${LIBRARY_DIR}/Crypto/src/DigestEngine.cpp - ${LIBRARY_DIR}/Crypto/src/ECDSADigestEngine.cpp - ${LIBRARY_DIR}/Crypto/src/ECKey.cpp - ${LIBRARY_DIR}/Crypto/src/ECKeyImpl.cpp - ${LIBRARY_DIR}/Crypto/src/EVPPKey.cpp - ${LIBRARY_DIR}/Crypto/src/KeyPair.cpp - ${LIBRARY_DIR}/Crypto/src/KeyPairImpl.cpp - ${LIBRARY_DIR}/Crypto/src/OpenSSLInitializer.cpp - ${LIBRARY_DIR}/Crypto/src/PKCS12Container.cpp - ${LIBRARY_DIR}/Crypto/src/RSACipherImpl.cpp - ${LIBRARY_DIR}/Crypto/src/RSADigestEngine.cpp - ${LIBRARY_DIR}/Crypto/src/RSAKey.cpp - ${LIBRARY_DIR}/Crypto/src/RSAKeyImpl.cpp - ${LIBRARY_DIR}/Crypto/src/X509Certificate.cpp + "${LIBRARY_DIR}/Crypto/src/Cipher.cpp" + "${LIBRARY_DIR}/Crypto/src/CipherFactory.cpp" + "${LIBRARY_DIR}/Crypto/src/CipherImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/CipherKey.cpp" + "${LIBRARY_DIR}/Crypto/src/CipherKeyImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/CryptoException.cpp" + "${LIBRARY_DIR}/Crypto/src/CryptoStream.cpp" + "${LIBRARY_DIR}/Crypto/src/CryptoTransform.cpp" + "${LIBRARY_DIR}/Crypto/src/DigestEngine.cpp" + "${LIBRARY_DIR}/Crypto/src/ECDSADigestEngine.cpp" + "${LIBRARY_DIR}/Crypto/src/ECKey.cpp" + "${LIBRARY_DIR}/Crypto/src/ECKeyImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/EVPPKey.cpp" + "${LIBRARY_DIR}/Crypto/src/KeyPair.cpp" + "${LIBRARY_DIR}/Crypto/src/KeyPairImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/OpenSSLInitializer.cpp" + "${LIBRARY_DIR}/Crypto/src/PKCS12Container.cpp" + "${LIBRARY_DIR}/Crypto/src/RSACipherImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/RSADigestEngine.cpp" + "${LIBRARY_DIR}/Crypto/src/RSAKey.cpp" + "${LIBRARY_DIR}/Crypto/src/RSAKeyImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/X509Certificate.cpp" ) add_library (_poco_crypto ${SRCS}) add_library (Poco::Crypto ALIAS _poco_crypto) target_compile_options (_poco_crypto PRIVATE -Wno-newline-eof) - target_include_directories (_poco_crypto SYSTEM PUBLIC ${LIBRARY_DIR}/Crypto/include) + target_include_directories (_poco_crypto SYSTEM PUBLIC "${LIBRARY_DIR}/Crypto/include") target_link_libraries (_poco_crypto PUBLIC Poco::Foundation ssl crypto) else () add_library (Poco::Crypto UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Data/CMakeLists.txt b/contrib/poco-cmake/Data/CMakeLists.txt index 1c185df8961..4fdd755b45d 100644 --- a/contrib/poco-cmake/Data/CMakeLists.txt +++ b/contrib/poco-cmake/Data/CMakeLists.txt @@ -1,40 +1,40 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Data/src/AbstractBinder.cpp - ${LIBRARY_DIR}/Data/src/AbstractBinding.cpp - ${LIBRARY_DIR}/Data/src/AbstractExtraction.cpp - ${LIBRARY_DIR}/Data/src/AbstractExtractor.cpp - ${LIBRARY_DIR}/Data/src/AbstractPreparation.cpp - ${LIBRARY_DIR}/Data/src/AbstractPreparator.cpp - ${LIBRARY_DIR}/Data/src/ArchiveStrategy.cpp - ${LIBRARY_DIR}/Data/src/Bulk.cpp - ${LIBRARY_DIR}/Data/src/Connector.cpp - ${LIBRARY_DIR}/Data/src/DataException.cpp - ${LIBRARY_DIR}/Data/src/Date.cpp - ${LIBRARY_DIR}/Data/src/DynamicLOB.cpp - ${LIBRARY_DIR}/Data/src/Limit.cpp - ${LIBRARY_DIR}/Data/src/MetaColumn.cpp - ${LIBRARY_DIR}/Data/src/PooledSessionHolder.cpp - ${LIBRARY_DIR}/Data/src/PooledSessionImpl.cpp - ${LIBRARY_DIR}/Data/src/Position.cpp - ${LIBRARY_DIR}/Data/src/Range.cpp - ${LIBRARY_DIR}/Data/src/RecordSet.cpp - ${LIBRARY_DIR}/Data/src/Row.cpp - ${LIBRARY_DIR}/Data/src/RowFilter.cpp - ${LIBRARY_DIR}/Data/src/RowFormatter.cpp - ${LIBRARY_DIR}/Data/src/RowIterator.cpp - ${LIBRARY_DIR}/Data/src/Session.cpp - ${LIBRARY_DIR}/Data/src/SessionFactory.cpp - ${LIBRARY_DIR}/Data/src/SessionImpl.cpp - ${LIBRARY_DIR}/Data/src/SessionPool.cpp - ${LIBRARY_DIR}/Data/src/SessionPoolContainer.cpp - ${LIBRARY_DIR}/Data/src/SimpleRowFormatter.cpp - ${LIBRARY_DIR}/Data/src/SQLChannel.cpp - ${LIBRARY_DIR}/Data/src/Statement.cpp - ${LIBRARY_DIR}/Data/src/StatementCreator.cpp - ${LIBRARY_DIR}/Data/src/StatementImpl.cpp - ${LIBRARY_DIR}/Data/src/Time.cpp - ${LIBRARY_DIR}/Data/src/Transaction.cpp + "${LIBRARY_DIR}/Data/src/AbstractBinder.cpp" + "${LIBRARY_DIR}/Data/src/AbstractBinding.cpp" + "${LIBRARY_DIR}/Data/src/AbstractExtraction.cpp" + "${LIBRARY_DIR}/Data/src/AbstractExtractor.cpp" + "${LIBRARY_DIR}/Data/src/AbstractPreparation.cpp" + "${LIBRARY_DIR}/Data/src/AbstractPreparator.cpp" + "${LIBRARY_DIR}/Data/src/ArchiveStrategy.cpp" + "${LIBRARY_DIR}/Data/src/Bulk.cpp" + "${LIBRARY_DIR}/Data/src/Connector.cpp" + "${LIBRARY_DIR}/Data/src/DataException.cpp" + "${LIBRARY_DIR}/Data/src/Date.cpp" + "${LIBRARY_DIR}/Data/src/DynamicLOB.cpp" + "${LIBRARY_DIR}/Data/src/Limit.cpp" + "${LIBRARY_DIR}/Data/src/MetaColumn.cpp" + "${LIBRARY_DIR}/Data/src/PooledSessionHolder.cpp" + "${LIBRARY_DIR}/Data/src/PooledSessionImpl.cpp" + "${LIBRARY_DIR}/Data/src/Position.cpp" + "${LIBRARY_DIR}/Data/src/Range.cpp" + "${LIBRARY_DIR}/Data/src/RecordSet.cpp" + "${LIBRARY_DIR}/Data/src/Row.cpp" + "${LIBRARY_DIR}/Data/src/RowFilter.cpp" + "${LIBRARY_DIR}/Data/src/RowFormatter.cpp" + "${LIBRARY_DIR}/Data/src/RowIterator.cpp" + "${LIBRARY_DIR}/Data/src/Session.cpp" + "${LIBRARY_DIR}/Data/src/SessionFactory.cpp" + "${LIBRARY_DIR}/Data/src/SessionImpl.cpp" + "${LIBRARY_DIR}/Data/src/SessionPool.cpp" + "${LIBRARY_DIR}/Data/src/SessionPoolContainer.cpp" + "${LIBRARY_DIR}/Data/src/SimpleRowFormatter.cpp" + "${LIBRARY_DIR}/Data/src/SQLChannel.cpp" + "${LIBRARY_DIR}/Data/src/Statement.cpp" + "${LIBRARY_DIR}/Data/src/StatementCreator.cpp" + "${LIBRARY_DIR}/Data/src/StatementImpl.cpp" + "${LIBRARY_DIR}/Data/src/Time.cpp" + "${LIBRARY_DIR}/Data/src/Transaction.cpp" ) add_library (_poco_data ${SRCS}) @@ -43,7 +43,7 @@ if (USE_INTERNAL_POCO_LIBRARY) if (COMPILER_GCC) target_compile_options (_poco_data PRIVATE -Wno-deprecated-copy) endif () - target_include_directories (_poco_data SYSTEM PUBLIC ${LIBRARY_DIR}/Data/include) + target_include_directories (_poco_data SYSTEM PUBLIC "${LIBRARY_DIR}/Data/include") target_link_libraries (_poco_data PUBLIC Poco::Foundation) else () # NOTE: don't know why, but the GLOBAL is required here. diff --git a/contrib/poco-cmake/Data/ODBC/CMakeLists.txt b/contrib/poco-cmake/Data/ODBC/CMakeLists.txt index cd7c5ef2863..a3561304541 100644 --- a/contrib/poco-cmake/Data/ODBC/CMakeLists.txt +++ b/contrib/poco-cmake/Data/ODBC/CMakeLists.txt @@ -5,27 +5,27 @@ if (ENABLE_ODBC) if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Data/ODBC/src/Binder.cpp - ${LIBRARY_DIR}/Data/ODBC/src/ConnectionHandle.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Connector.cpp - ${LIBRARY_DIR}/Data/ODBC/src/EnvironmentHandle.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Extractor.cpp - ${LIBRARY_DIR}/Data/ODBC/src/ODBCException.cpp - ${LIBRARY_DIR}/Data/ODBC/src/ODBCMetaColumn.cpp - ${LIBRARY_DIR}/Data/ODBC/src/ODBCStatementImpl.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Parameter.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Preparator.cpp - ${LIBRARY_DIR}/Data/ODBC/src/SessionImpl.cpp - ${LIBRARY_DIR}/Data/ODBC/src/TypeInfo.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Unicode.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Utility.cpp + "${LIBRARY_DIR}/Data/ODBC/src/Binder.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/ConnectionHandle.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Connector.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/EnvironmentHandle.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Extractor.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/ODBCException.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/ODBCMetaColumn.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/ODBCStatementImpl.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Parameter.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Preparator.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/SessionImpl.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/TypeInfo.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Unicode.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Utility.cpp" ) add_library (_poco_data_odbc ${SRCS}) add_library (Poco::Data::ODBC ALIAS _poco_data_odbc) target_compile_options (_poco_data_odbc PRIVATE -Wno-unused-variable) - target_include_directories (_poco_data_odbc SYSTEM PUBLIC ${LIBRARY_DIR}/Data/ODBC/include) + target_include_directories (_poco_data_odbc SYSTEM PUBLIC "${LIBRARY_DIR}/Data/ODBC/include") target_link_libraries (_poco_data_odbc PUBLIC Poco::Data unixodbc) else () add_library (Poco::Data::ODBC UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Foundation/CMakeLists.txt b/contrib/poco-cmake/Foundation/CMakeLists.txt index f4647461ec0..a9a4933873c 100644 --- a/contrib/poco-cmake/Foundation/CMakeLists.txt +++ b/contrib/poco-cmake/Foundation/CMakeLists.txt @@ -2,27 +2,27 @@ if (USE_INTERNAL_POCO_LIBRARY) # Foundation (pcre) set (SRCS_PCRE - ${LIBRARY_DIR}/Foundation/src/pcre_config.c - ${LIBRARY_DIR}/Foundation/src/pcre_byte_order.c - ${LIBRARY_DIR}/Foundation/src/pcre_chartables.c - ${LIBRARY_DIR}/Foundation/src/pcre_compile.c - ${LIBRARY_DIR}/Foundation/src/pcre_exec.c - ${LIBRARY_DIR}/Foundation/src/pcre_fullinfo.c - ${LIBRARY_DIR}/Foundation/src/pcre_globals.c - ${LIBRARY_DIR}/Foundation/src/pcre_maketables.c - ${LIBRARY_DIR}/Foundation/src/pcre_newline.c - ${LIBRARY_DIR}/Foundation/src/pcre_ord2utf8.c - ${LIBRARY_DIR}/Foundation/src/pcre_study.c - ${LIBRARY_DIR}/Foundation/src/pcre_tables.c - ${LIBRARY_DIR}/Foundation/src/pcre_dfa_exec.c - ${LIBRARY_DIR}/Foundation/src/pcre_get.c - ${LIBRARY_DIR}/Foundation/src/pcre_jit_compile.c - ${LIBRARY_DIR}/Foundation/src/pcre_refcount.c - ${LIBRARY_DIR}/Foundation/src/pcre_string_utils.c - ${LIBRARY_DIR}/Foundation/src/pcre_version.c - ${LIBRARY_DIR}/Foundation/src/pcre_ucd.c - ${LIBRARY_DIR}/Foundation/src/pcre_valid_utf8.c - ${LIBRARY_DIR}/Foundation/src/pcre_xclass.c + "${LIBRARY_DIR}/Foundation/src/pcre_config.c" + "${LIBRARY_DIR}/Foundation/src/pcre_byte_order.c" + "${LIBRARY_DIR}/Foundation/src/pcre_chartables.c" + "${LIBRARY_DIR}/Foundation/src/pcre_compile.c" + "${LIBRARY_DIR}/Foundation/src/pcre_exec.c" + "${LIBRARY_DIR}/Foundation/src/pcre_fullinfo.c" + "${LIBRARY_DIR}/Foundation/src/pcre_globals.c" + "${LIBRARY_DIR}/Foundation/src/pcre_maketables.c" + "${LIBRARY_DIR}/Foundation/src/pcre_newline.c" + "${LIBRARY_DIR}/Foundation/src/pcre_ord2utf8.c" + "${LIBRARY_DIR}/Foundation/src/pcre_study.c" + "${LIBRARY_DIR}/Foundation/src/pcre_tables.c" + "${LIBRARY_DIR}/Foundation/src/pcre_dfa_exec.c" + "${LIBRARY_DIR}/Foundation/src/pcre_get.c" + "${LIBRARY_DIR}/Foundation/src/pcre_jit_compile.c" + "${LIBRARY_DIR}/Foundation/src/pcre_refcount.c" + "${LIBRARY_DIR}/Foundation/src/pcre_string_utils.c" + "${LIBRARY_DIR}/Foundation/src/pcre_version.c" + "${LIBRARY_DIR}/Foundation/src/pcre_ucd.c" + "${LIBRARY_DIR}/Foundation/src/pcre_valid_utf8.c" + "${LIBRARY_DIR}/Foundation/src/pcre_xclass.c" ) add_library (_poco_foundation_pcre ${SRCS_PCRE}) @@ -33,159 +33,159 @@ if (USE_INTERNAL_POCO_LIBRARY) # Foundation set (SRCS - ${LIBRARY_DIR}/Foundation/src/AbstractObserver.cpp - ${LIBRARY_DIR}/Foundation/src/ActiveDispatcher.cpp - ${LIBRARY_DIR}/Foundation/src/ArchiveStrategy.cpp - ${LIBRARY_DIR}/Foundation/src/Ascii.cpp - ${LIBRARY_DIR}/Foundation/src/ASCIIEncoding.cpp - ${LIBRARY_DIR}/Foundation/src/AsyncChannel.cpp - ${LIBRARY_DIR}/Foundation/src/AtomicCounter.cpp - ${LIBRARY_DIR}/Foundation/src/Base32Decoder.cpp - ${LIBRARY_DIR}/Foundation/src/Base32Encoder.cpp - ${LIBRARY_DIR}/Foundation/src/Base64Decoder.cpp - ${LIBRARY_DIR}/Foundation/src/Base64Encoder.cpp - ${LIBRARY_DIR}/Foundation/src/BinaryReader.cpp - ${LIBRARY_DIR}/Foundation/src/BinaryWriter.cpp - ${LIBRARY_DIR}/Foundation/src/Bugcheck.cpp - ${LIBRARY_DIR}/Foundation/src/ByteOrder.cpp - ${LIBRARY_DIR}/Foundation/src/Channel.cpp - ${LIBRARY_DIR}/Foundation/src/Checksum.cpp - ${LIBRARY_DIR}/Foundation/src/Clock.cpp - ${LIBRARY_DIR}/Foundation/src/Condition.cpp - ${LIBRARY_DIR}/Foundation/src/Configurable.cpp - ${LIBRARY_DIR}/Foundation/src/ConsoleChannel.cpp - ${LIBRARY_DIR}/Foundation/src/CountingStream.cpp - ${LIBRARY_DIR}/Foundation/src/DateTime.cpp - ${LIBRARY_DIR}/Foundation/src/DateTimeFormat.cpp - ${LIBRARY_DIR}/Foundation/src/DateTimeFormatter.cpp - ${LIBRARY_DIR}/Foundation/src/DateTimeParser.cpp - ${LIBRARY_DIR}/Foundation/src/Debugger.cpp - ${LIBRARY_DIR}/Foundation/src/DeflatingStream.cpp - ${LIBRARY_DIR}/Foundation/src/DigestEngine.cpp - ${LIBRARY_DIR}/Foundation/src/DigestStream.cpp - ${LIBRARY_DIR}/Foundation/src/DirectoryIterator.cpp - ${LIBRARY_DIR}/Foundation/src/DirectoryIteratorStrategy.cpp - ${LIBRARY_DIR}/Foundation/src/DirectoryWatcher.cpp - ${LIBRARY_DIR}/Foundation/src/Environment.cpp - ${LIBRARY_DIR}/Foundation/src/Error.cpp - ${LIBRARY_DIR}/Foundation/src/ErrorHandler.cpp - ${LIBRARY_DIR}/Foundation/src/Event.cpp - ${LIBRARY_DIR}/Foundation/src/EventArgs.cpp - ${LIBRARY_DIR}/Foundation/src/EventChannel.cpp - ${LIBRARY_DIR}/Foundation/src/Exception.cpp - ${LIBRARY_DIR}/Foundation/src/FIFOBufferStream.cpp - ${LIBRARY_DIR}/Foundation/src/File.cpp - ${LIBRARY_DIR}/Foundation/src/FileChannel.cpp - ${LIBRARY_DIR}/Foundation/src/FileStream.cpp - ${LIBRARY_DIR}/Foundation/src/FileStreamFactory.cpp - ${LIBRARY_DIR}/Foundation/src/Format.cpp - ${LIBRARY_DIR}/Foundation/src/Formatter.cpp - ${LIBRARY_DIR}/Foundation/src/FormattingChannel.cpp - ${LIBRARY_DIR}/Foundation/src/FPEnvironment.cpp - ${LIBRARY_DIR}/Foundation/src/Glob.cpp - ${LIBRARY_DIR}/Foundation/src/Hash.cpp - ${LIBRARY_DIR}/Foundation/src/HashStatistic.cpp - ${LIBRARY_DIR}/Foundation/src/HexBinaryDecoder.cpp - ${LIBRARY_DIR}/Foundation/src/HexBinaryEncoder.cpp - ${LIBRARY_DIR}/Foundation/src/InflatingStream.cpp - ${LIBRARY_DIR}/Foundation/src/JSONString.cpp - ${LIBRARY_DIR}/Foundation/src/Latin1Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/Latin2Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/Latin9Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/LineEndingConverter.cpp - ${LIBRARY_DIR}/Foundation/src/LocalDateTime.cpp - ${LIBRARY_DIR}/Foundation/src/LogFile.cpp - ${LIBRARY_DIR}/Foundation/src/Logger.cpp - ${LIBRARY_DIR}/Foundation/src/LoggingFactory.cpp - ${LIBRARY_DIR}/Foundation/src/LoggingRegistry.cpp - ${LIBRARY_DIR}/Foundation/src/LogStream.cpp - ${LIBRARY_DIR}/Foundation/src/Manifest.cpp - ${LIBRARY_DIR}/Foundation/src/MD4Engine.cpp - ${LIBRARY_DIR}/Foundation/src/MD5Engine.cpp - ${LIBRARY_DIR}/Foundation/src/MemoryPool.cpp - ${LIBRARY_DIR}/Foundation/src/MemoryStream.cpp - ${LIBRARY_DIR}/Foundation/src/Message.cpp - ${LIBRARY_DIR}/Foundation/src/Mutex.cpp - ${LIBRARY_DIR}/Foundation/src/NamedEvent.cpp - ${LIBRARY_DIR}/Foundation/src/NamedMutex.cpp - ${LIBRARY_DIR}/Foundation/src/NestedDiagnosticContext.cpp - ${LIBRARY_DIR}/Foundation/src/Notification.cpp - ${LIBRARY_DIR}/Foundation/src/NotificationCenter.cpp - ${LIBRARY_DIR}/Foundation/src/NotificationQueue.cpp - ${LIBRARY_DIR}/Foundation/src/NullChannel.cpp - ${LIBRARY_DIR}/Foundation/src/NullStream.cpp - ${LIBRARY_DIR}/Foundation/src/NumberFormatter.cpp - ${LIBRARY_DIR}/Foundation/src/NumberParser.cpp - ${LIBRARY_DIR}/Foundation/src/NumericString.cpp - ${LIBRARY_DIR}/Foundation/src/Path.cpp - ${LIBRARY_DIR}/Foundation/src/PatternFormatter.cpp - ${LIBRARY_DIR}/Foundation/src/Pipe.cpp - ${LIBRARY_DIR}/Foundation/src/PipeImpl.cpp - ${LIBRARY_DIR}/Foundation/src/PipeStream.cpp - ${LIBRARY_DIR}/Foundation/src/PriorityNotificationQueue.cpp - ${LIBRARY_DIR}/Foundation/src/Process.cpp - ${LIBRARY_DIR}/Foundation/src/PurgeStrategy.cpp - ${LIBRARY_DIR}/Foundation/src/Random.cpp - ${LIBRARY_DIR}/Foundation/src/RandomStream.cpp - ${LIBRARY_DIR}/Foundation/src/RefCountedObject.cpp - ${LIBRARY_DIR}/Foundation/src/RegularExpression.cpp - ${LIBRARY_DIR}/Foundation/src/RotateStrategy.cpp - ${LIBRARY_DIR}/Foundation/src/Runnable.cpp - ${LIBRARY_DIR}/Foundation/src/RWLock.cpp - ${LIBRARY_DIR}/Foundation/src/Semaphore.cpp - ${LIBRARY_DIR}/Foundation/src/SHA1Engine.cpp - ${LIBRARY_DIR}/Foundation/src/SharedLibrary.cpp - ${LIBRARY_DIR}/Foundation/src/SharedMemory.cpp - ${LIBRARY_DIR}/Foundation/src/SignalHandler.cpp - ${LIBRARY_DIR}/Foundation/src/SimpleFileChannel.cpp - ${LIBRARY_DIR}/Foundation/src/SortedDirectoryIterator.cpp - ${LIBRARY_DIR}/Foundation/src/SplitterChannel.cpp - ${LIBRARY_DIR}/Foundation/src/Stopwatch.cpp - ${LIBRARY_DIR}/Foundation/src/StreamChannel.cpp - ${LIBRARY_DIR}/Foundation/src/StreamConverter.cpp - ${LIBRARY_DIR}/Foundation/src/StreamCopier.cpp - ${LIBRARY_DIR}/Foundation/src/StreamTokenizer.cpp - ${LIBRARY_DIR}/Foundation/src/String.cpp - ${LIBRARY_DIR}/Foundation/src/StringTokenizer.cpp - ${LIBRARY_DIR}/Foundation/src/SynchronizedObject.cpp - ${LIBRARY_DIR}/Foundation/src/SyslogChannel.cpp - ${LIBRARY_DIR}/Foundation/src/Task.cpp - ${LIBRARY_DIR}/Foundation/src/TaskManager.cpp - ${LIBRARY_DIR}/Foundation/src/TaskNotification.cpp - ${LIBRARY_DIR}/Foundation/src/TeeStream.cpp - ${LIBRARY_DIR}/Foundation/src/TemporaryFile.cpp - ${LIBRARY_DIR}/Foundation/src/TextBufferIterator.cpp - ${LIBRARY_DIR}/Foundation/src/TextConverter.cpp - ${LIBRARY_DIR}/Foundation/src/TextEncoding.cpp - ${LIBRARY_DIR}/Foundation/src/TextIterator.cpp - ${LIBRARY_DIR}/Foundation/src/Thread.cpp - ${LIBRARY_DIR}/Foundation/src/ThreadLocal.cpp - ${LIBRARY_DIR}/Foundation/src/ThreadPool.cpp - ${LIBRARY_DIR}/Foundation/src/ThreadTarget.cpp - ${LIBRARY_DIR}/Foundation/src/TimedNotificationQueue.cpp - ${LIBRARY_DIR}/Foundation/src/Timer.cpp - ${LIBRARY_DIR}/Foundation/src/Timespan.cpp - ${LIBRARY_DIR}/Foundation/src/Timestamp.cpp - ${LIBRARY_DIR}/Foundation/src/Timezone.cpp - ${LIBRARY_DIR}/Foundation/src/Token.cpp - ${LIBRARY_DIR}/Foundation/src/Unicode.cpp - ${LIBRARY_DIR}/Foundation/src/UnicodeConverter.cpp - ${LIBRARY_DIR}/Foundation/src/URI.cpp - ${LIBRARY_DIR}/Foundation/src/URIStreamFactory.cpp - ${LIBRARY_DIR}/Foundation/src/URIStreamOpener.cpp - ${LIBRARY_DIR}/Foundation/src/UTF16Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/UTF32Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/UTF8Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/UTF8String.cpp - ${LIBRARY_DIR}/Foundation/src/UUID.cpp - ${LIBRARY_DIR}/Foundation/src/UUIDGenerator.cpp - ${LIBRARY_DIR}/Foundation/src/Var.cpp - ${LIBRARY_DIR}/Foundation/src/VarHolder.cpp - ${LIBRARY_DIR}/Foundation/src/VarIterator.cpp - ${LIBRARY_DIR}/Foundation/src/Void.cpp - ${LIBRARY_DIR}/Foundation/src/Windows1250Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/Windows1251Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/Windows1252Encoding.cpp + "${LIBRARY_DIR}/Foundation/src/AbstractObserver.cpp" + "${LIBRARY_DIR}/Foundation/src/ActiveDispatcher.cpp" + "${LIBRARY_DIR}/Foundation/src/ArchiveStrategy.cpp" + "${LIBRARY_DIR}/Foundation/src/Ascii.cpp" + "${LIBRARY_DIR}/Foundation/src/ASCIIEncoding.cpp" + "${LIBRARY_DIR}/Foundation/src/AsyncChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/AtomicCounter.cpp" + "${LIBRARY_DIR}/Foundation/src/Base32Decoder.cpp" + "${LIBRARY_DIR}/Foundation/src/Base32Encoder.cpp" + "${LIBRARY_DIR}/Foundation/src/Base64Decoder.cpp" + "${LIBRARY_DIR}/Foundation/src/Base64Encoder.cpp" + "${LIBRARY_DIR}/Foundation/src/BinaryReader.cpp" + "${LIBRARY_DIR}/Foundation/src/BinaryWriter.cpp" + "${LIBRARY_DIR}/Foundation/src/Bugcheck.cpp" + "${LIBRARY_DIR}/Foundation/src/ByteOrder.cpp" + "${LIBRARY_DIR}/Foundation/src/Channel.cpp" + "${LIBRARY_DIR}/Foundation/src/Checksum.cpp" + "${LIBRARY_DIR}/Foundation/src/Clock.cpp" + "${LIBRARY_DIR}/Foundation/src/Condition.cpp" + "${LIBRARY_DIR}/Foundation/src/Configurable.cpp" + "${LIBRARY_DIR}/Foundation/src/ConsoleChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/CountingStream.cpp" + "${LIBRARY_DIR}/Foundation/src/DateTime.cpp" + "${LIBRARY_DIR}/Foundation/src/DateTimeFormat.cpp" + "${LIBRARY_DIR}/Foundation/src/DateTimeFormatter.cpp" + "${LIBRARY_DIR}/Foundation/src/DateTimeParser.cpp" + "${LIBRARY_DIR}/Foundation/src/Debugger.cpp" + "${LIBRARY_DIR}/Foundation/src/DeflatingStream.cpp" + "${LIBRARY_DIR}/Foundation/src/DigestEngine.cpp" + "${LIBRARY_DIR}/Foundation/src/DigestStream.cpp" + "${LIBRARY_DIR}/Foundation/src/DirectoryIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/DirectoryIteratorStrategy.cpp" + "${LIBRARY_DIR}/Foundation/src/DirectoryWatcher.cpp" + "${LIBRARY_DIR}/Foundation/src/Environment.cpp" + "${LIBRARY_DIR}/Foundation/src/Error.cpp" + "${LIBRARY_DIR}/Foundation/src/ErrorHandler.cpp" + "${LIBRARY_DIR}/Foundation/src/Event.cpp" + "${LIBRARY_DIR}/Foundation/src/EventArgs.cpp" + "${LIBRARY_DIR}/Foundation/src/EventChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/Exception.cpp" + "${LIBRARY_DIR}/Foundation/src/FIFOBufferStream.cpp" + "${LIBRARY_DIR}/Foundation/src/File.cpp" + "${LIBRARY_DIR}/Foundation/src/FileChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/FileStream.cpp" + "${LIBRARY_DIR}/Foundation/src/FileStreamFactory.cpp" + "${LIBRARY_DIR}/Foundation/src/Format.cpp" + "${LIBRARY_DIR}/Foundation/src/Formatter.cpp" + "${LIBRARY_DIR}/Foundation/src/FormattingChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/FPEnvironment.cpp" + "${LIBRARY_DIR}/Foundation/src/Glob.cpp" + "${LIBRARY_DIR}/Foundation/src/Hash.cpp" + "${LIBRARY_DIR}/Foundation/src/HashStatistic.cpp" + "${LIBRARY_DIR}/Foundation/src/HexBinaryDecoder.cpp" + "${LIBRARY_DIR}/Foundation/src/HexBinaryEncoder.cpp" + "${LIBRARY_DIR}/Foundation/src/InflatingStream.cpp" + "${LIBRARY_DIR}/Foundation/src/JSONString.cpp" + "${LIBRARY_DIR}/Foundation/src/Latin1Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/Latin2Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/Latin9Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/LineEndingConverter.cpp" + "${LIBRARY_DIR}/Foundation/src/LocalDateTime.cpp" + "${LIBRARY_DIR}/Foundation/src/LogFile.cpp" + "${LIBRARY_DIR}/Foundation/src/Logger.cpp" + "${LIBRARY_DIR}/Foundation/src/LoggingFactory.cpp" + "${LIBRARY_DIR}/Foundation/src/LoggingRegistry.cpp" + "${LIBRARY_DIR}/Foundation/src/LogStream.cpp" + "${LIBRARY_DIR}/Foundation/src/Manifest.cpp" + "${LIBRARY_DIR}/Foundation/src/MD4Engine.cpp" + "${LIBRARY_DIR}/Foundation/src/MD5Engine.cpp" + "${LIBRARY_DIR}/Foundation/src/MemoryPool.cpp" + "${LIBRARY_DIR}/Foundation/src/MemoryStream.cpp" + "${LIBRARY_DIR}/Foundation/src/Message.cpp" + "${LIBRARY_DIR}/Foundation/src/Mutex.cpp" + "${LIBRARY_DIR}/Foundation/src/NamedEvent.cpp" + "${LIBRARY_DIR}/Foundation/src/NamedMutex.cpp" + "${LIBRARY_DIR}/Foundation/src/NestedDiagnosticContext.cpp" + "${LIBRARY_DIR}/Foundation/src/Notification.cpp" + "${LIBRARY_DIR}/Foundation/src/NotificationCenter.cpp" + "${LIBRARY_DIR}/Foundation/src/NotificationQueue.cpp" + "${LIBRARY_DIR}/Foundation/src/NullChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/NullStream.cpp" + "${LIBRARY_DIR}/Foundation/src/NumberFormatter.cpp" + "${LIBRARY_DIR}/Foundation/src/NumberParser.cpp" + "${LIBRARY_DIR}/Foundation/src/NumericString.cpp" + "${LIBRARY_DIR}/Foundation/src/Path.cpp" + "${LIBRARY_DIR}/Foundation/src/PatternFormatter.cpp" + "${LIBRARY_DIR}/Foundation/src/Pipe.cpp" + "${LIBRARY_DIR}/Foundation/src/PipeImpl.cpp" + "${LIBRARY_DIR}/Foundation/src/PipeStream.cpp" + "${LIBRARY_DIR}/Foundation/src/PriorityNotificationQueue.cpp" + "${LIBRARY_DIR}/Foundation/src/Process.cpp" + "${LIBRARY_DIR}/Foundation/src/PurgeStrategy.cpp" + "${LIBRARY_DIR}/Foundation/src/Random.cpp" + "${LIBRARY_DIR}/Foundation/src/RandomStream.cpp" + "${LIBRARY_DIR}/Foundation/src/RefCountedObject.cpp" + "${LIBRARY_DIR}/Foundation/src/RegularExpression.cpp" + "${LIBRARY_DIR}/Foundation/src/RotateStrategy.cpp" + "${LIBRARY_DIR}/Foundation/src/Runnable.cpp" + "${LIBRARY_DIR}/Foundation/src/RWLock.cpp" + "${LIBRARY_DIR}/Foundation/src/Semaphore.cpp" + "${LIBRARY_DIR}/Foundation/src/SHA1Engine.cpp" + "${LIBRARY_DIR}/Foundation/src/SharedLibrary.cpp" + "${LIBRARY_DIR}/Foundation/src/SharedMemory.cpp" + "${LIBRARY_DIR}/Foundation/src/SignalHandler.cpp" + "${LIBRARY_DIR}/Foundation/src/SimpleFileChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/SortedDirectoryIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/SplitterChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/Stopwatch.cpp" + "${LIBRARY_DIR}/Foundation/src/StreamChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/StreamConverter.cpp" + "${LIBRARY_DIR}/Foundation/src/StreamCopier.cpp" + "${LIBRARY_DIR}/Foundation/src/StreamTokenizer.cpp" + "${LIBRARY_DIR}/Foundation/src/String.cpp" + "${LIBRARY_DIR}/Foundation/src/StringTokenizer.cpp" + "${LIBRARY_DIR}/Foundation/src/SynchronizedObject.cpp" + "${LIBRARY_DIR}/Foundation/src/SyslogChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/Task.cpp" + "${LIBRARY_DIR}/Foundation/src/TaskManager.cpp" + "${LIBRARY_DIR}/Foundation/src/TaskNotification.cpp" + "${LIBRARY_DIR}/Foundation/src/TeeStream.cpp" + "${LIBRARY_DIR}/Foundation/src/TemporaryFile.cpp" + "${LIBRARY_DIR}/Foundation/src/TextBufferIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/TextConverter.cpp" + "${LIBRARY_DIR}/Foundation/src/TextEncoding.cpp" + "${LIBRARY_DIR}/Foundation/src/TextIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/Thread.cpp" + "${LIBRARY_DIR}/Foundation/src/ThreadLocal.cpp" + "${LIBRARY_DIR}/Foundation/src/ThreadPool.cpp" + "${LIBRARY_DIR}/Foundation/src/ThreadTarget.cpp" + "${LIBRARY_DIR}/Foundation/src/TimedNotificationQueue.cpp" + "${LIBRARY_DIR}/Foundation/src/Timer.cpp" + "${LIBRARY_DIR}/Foundation/src/Timespan.cpp" + "${LIBRARY_DIR}/Foundation/src/Timestamp.cpp" + "${LIBRARY_DIR}/Foundation/src/Timezone.cpp" + "${LIBRARY_DIR}/Foundation/src/Token.cpp" + "${LIBRARY_DIR}/Foundation/src/Unicode.cpp" + "${LIBRARY_DIR}/Foundation/src/UnicodeConverter.cpp" + "${LIBRARY_DIR}/Foundation/src/URI.cpp" + "${LIBRARY_DIR}/Foundation/src/URIStreamFactory.cpp" + "${LIBRARY_DIR}/Foundation/src/URIStreamOpener.cpp" + "${LIBRARY_DIR}/Foundation/src/UTF16Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/UTF32Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/UTF8Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/UTF8String.cpp" + "${LIBRARY_DIR}/Foundation/src/UUID.cpp" + "${LIBRARY_DIR}/Foundation/src/UUIDGenerator.cpp" + "${LIBRARY_DIR}/Foundation/src/Var.cpp" + "${LIBRARY_DIR}/Foundation/src/VarHolder.cpp" + "${LIBRARY_DIR}/Foundation/src/VarIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/Void.cpp" + "${LIBRARY_DIR}/Foundation/src/Windows1250Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/Windows1251Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/Windows1252Encoding.cpp" ) add_library (_poco_foundation ${SRCS}) @@ -221,7 +221,7 @@ if (USE_INTERNAL_POCO_LIBRARY) POCO_ENABLE_CPP11 POCO_OS_FAMILY_UNIX ) - target_include_directories (_poco_foundation SYSTEM PUBLIC ${LIBRARY_DIR}/Foundation/include) + target_include_directories (_poco_foundation SYSTEM PUBLIC "${LIBRARY_DIR}/Foundation/include") target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE ${ZLIB_LIBRARIES}) else () add_library (Poco::Foundation UNKNOWN IMPORTED GLOBAL) @@ -233,3 +233,10 @@ else () message (STATUS "Using Poco::Foundation: ${LIBRARY_POCO_FOUNDATION} ${INCLUDE_POCO_FOUNDATION}") endif () + +if(OS_DARWIN AND ARCH_AARCH64) + target_compile_definitions (_poco_foundation + PRIVATE + POCO_NO_STAT64 + ) +endif() diff --git a/contrib/poco-cmake/JSON/CMakeLists.txt b/contrib/poco-cmake/JSON/CMakeLists.txt index 89054cf225d..7033b800d5d 100644 --- a/contrib/poco-cmake/JSON/CMakeLists.txt +++ b/contrib/poco-cmake/JSON/CMakeLists.txt @@ -2,7 +2,7 @@ if (USE_INTERNAL_POCO_LIBRARY) # Poco::JSON (pdjson) set (SRCS_PDJSON - ${LIBRARY_DIR}/JSON/src/pdjson.c + "${LIBRARY_DIR}/JSON/src/pdjson.c" ) add_library (_poco_json_pdjson ${SRCS_PDJSON}) @@ -11,24 +11,24 @@ if (USE_INTERNAL_POCO_LIBRARY) # Poco::JSON set (SRCS - ${LIBRARY_DIR}/JSON/src/Array.cpp - ${LIBRARY_DIR}/JSON/src/Handler.cpp - ${LIBRARY_DIR}/JSON/src/JSONException.cpp - ${LIBRARY_DIR}/JSON/src/Object.cpp - ${LIBRARY_DIR}/JSON/src/ParseHandler.cpp - ${LIBRARY_DIR}/JSON/src/Parser.cpp - ${LIBRARY_DIR}/JSON/src/ParserImpl.cpp - ${LIBRARY_DIR}/JSON/src/PrintHandler.cpp - ${LIBRARY_DIR}/JSON/src/Query.cpp - ${LIBRARY_DIR}/JSON/src/Stringifier.cpp - ${LIBRARY_DIR}/JSON/src/Template.cpp - ${LIBRARY_DIR}/JSON/src/TemplateCache.cpp + "${LIBRARY_DIR}/JSON/src/Array.cpp" + "${LIBRARY_DIR}/JSON/src/Handler.cpp" + "${LIBRARY_DIR}/JSON/src/JSONException.cpp" + "${LIBRARY_DIR}/JSON/src/Object.cpp" + "${LIBRARY_DIR}/JSON/src/ParseHandler.cpp" + "${LIBRARY_DIR}/JSON/src/Parser.cpp" + "${LIBRARY_DIR}/JSON/src/ParserImpl.cpp" + "${LIBRARY_DIR}/JSON/src/PrintHandler.cpp" + "${LIBRARY_DIR}/JSON/src/Query.cpp" + "${LIBRARY_DIR}/JSON/src/Stringifier.cpp" + "${LIBRARY_DIR}/JSON/src/Template.cpp" + "${LIBRARY_DIR}/JSON/src/TemplateCache.cpp" ) add_library (_poco_json ${SRCS}) add_library (Poco::JSON ALIAS _poco_json) - target_include_directories (_poco_json SYSTEM PUBLIC ${LIBRARY_DIR}/JSON/include) + target_include_directories (_poco_json SYSTEM PUBLIC "${LIBRARY_DIR}/JSON/include") target_link_libraries (_poco_json PUBLIC Poco::Foundation Poco::JSON::Pdjson) else () add_library (Poco::JSON UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/MongoDB/CMakeLists.txt b/contrib/poco-cmake/MongoDB/CMakeLists.txt index 0d79f680a64..e3dce7ac5cd 100644 --- a/contrib/poco-cmake/MongoDB/CMakeLists.txt +++ b/contrib/poco-cmake/MongoDB/CMakeLists.txt @@ -1,32 +1,32 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/MongoDB/src/Array.cpp - ${LIBRARY_DIR}/MongoDB/src/Binary.cpp - ${LIBRARY_DIR}/MongoDB/src/Connection.cpp - ${LIBRARY_DIR}/MongoDB/src/Cursor.cpp - ${LIBRARY_DIR}/MongoDB/src/Database.cpp - ${LIBRARY_DIR}/MongoDB/src/DeleteRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/Document.cpp - ${LIBRARY_DIR}/MongoDB/src/Element.cpp - ${LIBRARY_DIR}/MongoDB/src/GetMoreRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/InsertRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/JavaScriptCode.cpp - ${LIBRARY_DIR}/MongoDB/src/KillCursorsRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/Message.cpp - ${LIBRARY_DIR}/MongoDB/src/MessageHeader.cpp - ${LIBRARY_DIR}/MongoDB/src/ObjectId.cpp - ${LIBRARY_DIR}/MongoDB/src/QueryRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/RegularExpression.cpp - ${LIBRARY_DIR}/MongoDB/src/ReplicaSet.cpp - ${LIBRARY_DIR}/MongoDB/src/RequestMessage.cpp - ${LIBRARY_DIR}/MongoDB/src/ResponseMessage.cpp - ${LIBRARY_DIR}/MongoDB/src/UpdateRequest.cpp + "${LIBRARY_DIR}/MongoDB/src/Array.cpp" + "${LIBRARY_DIR}/MongoDB/src/Binary.cpp" + "${LIBRARY_DIR}/MongoDB/src/Connection.cpp" + "${LIBRARY_DIR}/MongoDB/src/Cursor.cpp" + "${LIBRARY_DIR}/MongoDB/src/Database.cpp" + "${LIBRARY_DIR}/MongoDB/src/DeleteRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/Document.cpp" + "${LIBRARY_DIR}/MongoDB/src/Element.cpp" + "${LIBRARY_DIR}/MongoDB/src/GetMoreRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/InsertRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/JavaScriptCode.cpp" + "${LIBRARY_DIR}/MongoDB/src/KillCursorsRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/Message.cpp" + "${LIBRARY_DIR}/MongoDB/src/MessageHeader.cpp" + "${LIBRARY_DIR}/MongoDB/src/ObjectId.cpp" + "${LIBRARY_DIR}/MongoDB/src/QueryRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/RegularExpression.cpp" + "${LIBRARY_DIR}/MongoDB/src/ReplicaSet.cpp" + "${LIBRARY_DIR}/MongoDB/src/RequestMessage.cpp" + "${LIBRARY_DIR}/MongoDB/src/ResponseMessage.cpp" + "${LIBRARY_DIR}/MongoDB/src/UpdateRequest.cpp" ) add_library (_poco_mongodb ${SRCS}) add_library (Poco::MongoDB ALIAS _poco_mongodb) - target_include_directories (_poco_mongodb SYSTEM PUBLIC ${LIBRARY_DIR}/MongoDB/include) + target_include_directories (_poco_mongodb SYSTEM PUBLIC "${LIBRARY_DIR}/MongoDB/include") target_link_libraries (_poco_mongodb PUBLIC Poco::Net) else () add_library (Poco::MongoDB UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Net/CMakeLists.txt b/contrib/poco-cmake/Net/CMakeLists.txt index 9bc06e52e05..45989af8d45 100644 --- a/contrib/poco-cmake/Net/CMakeLists.txt +++ b/contrib/poco-cmake/Net/CMakeLists.txt @@ -1,105 +1,105 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Net/src/AbstractHTTPRequestHandler.cpp - ${LIBRARY_DIR}/Net/src/DatagramSocket.cpp - ${LIBRARY_DIR}/Net/src/DatagramSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/DialogSocket.cpp - ${LIBRARY_DIR}/Net/src/DNS.cpp - ${LIBRARY_DIR}/Net/src/FilePartSource.cpp - ${LIBRARY_DIR}/Net/src/FTPClientSession.cpp - ${LIBRARY_DIR}/Net/src/FTPStreamFactory.cpp - ${LIBRARY_DIR}/Net/src/HostEntry.cpp - ${LIBRARY_DIR}/Net/src/HTMLForm.cpp - ${LIBRARY_DIR}/Net/src/HTTPAuthenticationParams.cpp - ${LIBRARY_DIR}/Net/src/HTTPBasicCredentials.cpp - ${LIBRARY_DIR}/Net/src/HTTPBufferAllocator.cpp - ${LIBRARY_DIR}/Net/src/HTTPChunkedStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPClientSession.cpp - ${LIBRARY_DIR}/Net/src/HTTPCookie.cpp - ${LIBRARY_DIR}/Net/src/HTTPCredentials.cpp - ${LIBRARY_DIR}/Net/src/HTTPDigestCredentials.cpp - ${LIBRARY_DIR}/Net/src/HTTPFixedLengthStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPHeaderStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPIOStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPMessage.cpp - ${LIBRARY_DIR}/Net/src/HTTPRequest.cpp - ${LIBRARY_DIR}/Net/src/HTTPRequestHandler.cpp - ${LIBRARY_DIR}/Net/src/HTTPRequestHandlerFactory.cpp - ${LIBRARY_DIR}/Net/src/HTTPResponse.cpp - ${LIBRARY_DIR}/Net/src/HTTPServer.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerConnection.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerConnectionFactory.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerParams.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerRequest.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerRequestImpl.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerResponse.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerResponseImpl.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerSession.cpp - ${LIBRARY_DIR}/Net/src/HTTPSession.cpp - ${LIBRARY_DIR}/Net/src/HTTPSessionFactory.cpp - ${LIBRARY_DIR}/Net/src/HTTPSessionInstantiator.cpp - ${LIBRARY_DIR}/Net/src/HTTPStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPStreamFactory.cpp - ${LIBRARY_DIR}/Net/src/ICMPClient.cpp - ${LIBRARY_DIR}/Net/src/ICMPEventArgs.cpp - ${LIBRARY_DIR}/Net/src/ICMPPacket.cpp - ${LIBRARY_DIR}/Net/src/ICMPPacketImpl.cpp - ${LIBRARY_DIR}/Net/src/ICMPSocket.cpp - ${LIBRARY_DIR}/Net/src/ICMPSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/ICMPv4PacketImpl.cpp - ${LIBRARY_DIR}/Net/src/IPAddress.cpp - ${LIBRARY_DIR}/Net/src/IPAddressImpl.cpp - ${LIBRARY_DIR}/Net/src/MailMessage.cpp - ${LIBRARY_DIR}/Net/src/MailRecipient.cpp - ${LIBRARY_DIR}/Net/src/MailStream.cpp - ${LIBRARY_DIR}/Net/src/MediaType.cpp - ${LIBRARY_DIR}/Net/src/MessageHeader.cpp - ${LIBRARY_DIR}/Net/src/MulticastSocket.cpp - ${LIBRARY_DIR}/Net/src/MultipartReader.cpp - ${LIBRARY_DIR}/Net/src/MultipartWriter.cpp - ${LIBRARY_DIR}/Net/src/NameValueCollection.cpp - ${LIBRARY_DIR}/Net/src/Net.cpp - ${LIBRARY_DIR}/Net/src/NetException.cpp - ${LIBRARY_DIR}/Net/src/NetworkInterface.cpp - ${LIBRARY_DIR}/Net/src/NTPClient.cpp - ${LIBRARY_DIR}/Net/src/NTPEventArgs.cpp - ${LIBRARY_DIR}/Net/src/NTPPacket.cpp - ${LIBRARY_DIR}/Net/src/NullPartHandler.cpp - ${LIBRARY_DIR}/Net/src/OAuth10Credentials.cpp - ${LIBRARY_DIR}/Net/src/OAuth20Credentials.cpp - ${LIBRARY_DIR}/Net/src/PartHandler.cpp - ${LIBRARY_DIR}/Net/src/PartSource.cpp - ${LIBRARY_DIR}/Net/src/PartStore.cpp - ${LIBRARY_DIR}/Net/src/PollSet.cpp - ${LIBRARY_DIR}/Net/src/POP3ClientSession.cpp - ${LIBRARY_DIR}/Net/src/QuotedPrintableDecoder.cpp - ${LIBRARY_DIR}/Net/src/QuotedPrintableEncoder.cpp - ${LIBRARY_DIR}/Net/src/RawSocket.cpp - ${LIBRARY_DIR}/Net/src/RawSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/RemoteSyslogChannel.cpp - ${LIBRARY_DIR}/Net/src/RemoteSyslogListener.cpp - ${LIBRARY_DIR}/Net/src/ServerSocket.cpp - ${LIBRARY_DIR}/Net/src/ServerSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/SMTPChannel.cpp - ${LIBRARY_DIR}/Net/src/SMTPClientSession.cpp - ${LIBRARY_DIR}/Net/src/Socket.cpp - ${LIBRARY_DIR}/Net/src/SocketAddress.cpp - ${LIBRARY_DIR}/Net/src/SocketAddressImpl.cpp - ${LIBRARY_DIR}/Net/src/SocketImpl.cpp - ${LIBRARY_DIR}/Net/src/SocketNotification.cpp - ${LIBRARY_DIR}/Net/src/SocketNotifier.cpp - ${LIBRARY_DIR}/Net/src/SocketReactor.cpp - ${LIBRARY_DIR}/Net/src/SocketStream.cpp - ${LIBRARY_DIR}/Net/src/StreamSocket.cpp - ${LIBRARY_DIR}/Net/src/StreamSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/StringPartSource.cpp - ${LIBRARY_DIR}/Net/src/TCPServer.cpp - ${LIBRARY_DIR}/Net/src/TCPServerConnection.cpp - ${LIBRARY_DIR}/Net/src/TCPServerConnectionFactory.cpp - ${LIBRARY_DIR}/Net/src/TCPServerDispatcher.cpp - ${LIBRARY_DIR}/Net/src/TCPServerParams.cpp - ${LIBRARY_DIR}/Net/src/WebSocket.cpp - ${LIBRARY_DIR}/Net/src/WebSocketImpl.cpp + "${LIBRARY_DIR}/Net/src/AbstractHTTPRequestHandler.cpp" + "${LIBRARY_DIR}/Net/src/DatagramSocket.cpp" + "${LIBRARY_DIR}/Net/src/DatagramSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/DialogSocket.cpp" + "${LIBRARY_DIR}/Net/src/DNS.cpp" + "${LIBRARY_DIR}/Net/src/FilePartSource.cpp" + "${LIBRARY_DIR}/Net/src/FTPClientSession.cpp" + "${LIBRARY_DIR}/Net/src/FTPStreamFactory.cpp" + "${LIBRARY_DIR}/Net/src/HostEntry.cpp" + "${LIBRARY_DIR}/Net/src/HTMLForm.cpp" + "${LIBRARY_DIR}/Net/src/HTTPAuthenticationParams.cpp" + "${LIBRARY_DIR}/Net/src/HTTPBasicCredentials.cpp" + "${LIBRARY_DIR}/Net/src/HTTPBufferAllocator.cpp" + "${LIBRARY_DIR}/Net/src/HTTPChunkedStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPClientSession.cpp" + "${LIBRARY_DIR}/Net/src/HTTPCookie.cpp" + "${LIBRARY_DIR}/Net/src/HTTPCredentials.cpp" + "${LIBRARY_DIR}/Net/src/HTTPDigestCredentials.cpp" + "${LIBRARY_DIR}/Net/src/HTTPFixedLengthStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPHeaderStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPIOStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPMessage.cpp" + "${LIBRARY_DIR}/Net/src/HTTPRequest.cpp" + "${LIBRARY_DIR}/Net/src/HTTPRequestHandler.cpp" + "${LIBRARY_DIR}/Net/src/HTTPRequestHandlerFactory.cpp" + "${LIBRARY_DIR}/Net/src/HTTPResponse.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServer.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerConnection.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerConnectionFactory.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerParams.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerRequest.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerRequestImpl.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerResponse.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerResponseImpl.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerSession.cpp" + "${LIBRARY_DIR}/Net/src/HTTPSession.cpp" + "${LIBRARY_DIR}/Net/src/HTTPSessionFactory.cpp" + "${LIBRARY_DIR}/Net/src/HTTPSessionInstantiator.cpp" + "${LIBRARY_DIR}/Net/src/HTTPStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPStreamFactory.cpp" + "${LIBRARY_DIR}/Net/src/ICMPClient.cpp" + "${LIBRARY_DIR}/Net/src/ICMPEventArgs.cpp" + "${LIBRARY_DIR}/Net/src/ICMPPacket.cpp" + "${LIBRARY_DIR}/Net/src/ICMPPacketImpl.cpp" + "${LIBRARY_DIR}/Net/src/ICMPSocket.cpp" + "${LIBRARY_DIR}/Net/src/ICMPSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/ICMPv4PacketImpl.cpp" + "${LIBRARY_DIR}/Net/src/IPAddress.cpp" + "${LIBRARY_DIR}/Net/src/IPAddressImpl.cpp" + "${LIBRARY_DIR}/Net/src/MailMessage.cpp" + "${LIBRARY_DIR}/Net/src/MailRecipient.cpp" + "${LIBRARY_DIR}/Net/src/MailStream.cpp" + "${LIBRARY_DIR}/Net/src/MediaType.cpp" + "${LIBRARY_DIR}/Net/src/MessageHeader.cpp" + "${LIBRARY_DIR}/Net/src/MulticastSocket.cpp" + "${LIBRARY_DIR}/Net/src/MultipartReader.cpp" + "${LIBRARY_DIR}/Net/src/MultipartWriter.cpp" + "${LIBRARY_DIR}/Net/src/NameValueCollection.cpp" + "${LIBRARY_DIR}/Net/src/Net.cpp" + "${LIBRARY_DIR}/Net/src/NetException.cpp" + "${LIBRARY_DIR}/Net/src/NetworkInterface.cpp" + "${LIBRARY_DIR}/Net/src/NTPClient.cpp" + "${LIBRARY_DIR}/Net/src/NTPEventArgs.cpp" + "${LIBRARY_DIR}/Net/src/NTPPacket.cpp" + "${LIBRARY_DIR}/Net/src/NullPartHandler.cpp" + "${LIBRARY_DIR}/Net/src/OAuth10Credentials.cpp" + "${LIBRARY_DIR}/Net/src/OAuth20Credentials.cpp" + "${LIBRARY_DIR}/Net/src/PartHandler.cpp" + "${LIBRARY_DIR}/Net/src/PartSource.cpp" + "${LIBRARY_DIR}/Net/src/PartStore.cpp" + "${LIBRARY_DIR}/Net/src/PollSet.cpp" + "${LIBRARY_DIR}/Net/src/POP3ClientSession.cpp" + "${LIBRARY_DIR}/Net/src/QuotedPrintableDecoder.cpp" + "${LIBRARY_DIR}/Net/src/QuotedPrintableEncoder.cpp" + "${LIBRARY_DIR}/Net/src/RawSocket.cpp" + "${LIBRARY_DIR}/Net/src/RawSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/RemoteSyslogChannel.cpp" + "${LIBRARY_DIR}/Net/src/RemoteSyslogListener.cpp" + "${LIBRARY_DIR}/Net/src/ServerSocket.cpp" + "${LIBRARY_DIR}/Net/src/ServerSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/SMTPChannel.cpp" + "${LIBRARY_DIR}/Net/src/SMTPClientSession.cpp" + "${LIBRARY_DIR}/Net/src/Socket.cpp" + "${LIBRARY_DIR}/Net/src/SocketAddress.cpp" + "${LIBRARY_DIR}/Net/src/SocketAddressImpl.cpp" + "${LIBRARY_DIR}/Net/src/SocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/SocketNotification.cpp" + "${LIBRARY_DIR}/Net/src/SocketNotifier.cpp" + "${LIBRARY_DIR}/Net/src/SocketReactor.cpp" + "${LIBRARY_DIR}/Net/src/SocketStream.cpp" + "${LIBRARY_DIR}/Net/src/StreamSocket.cpp" + "${LIBRARY_DIR}/Net/src/StreamSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/StringPartSource.cpp" + "${LIBRARY_DIR}/Net/src/TCPServer.cpp" + "${LIBRARY_DIR}/Net/src/TCPServerConnection.cpp" + "${LIBRARY_DIR}/Net/src/TCPServerConnectionFactory.cpp" + "${LIBRARY_DIR}/Net/src/TCPServerDispatcher.cpp" + "${LIBRARY_DIR}/Net/src/TCPServerParams.cpp" + "${LIBRARY_DIR}/Net/src/WebSocket.cpp" + "${LIBRARY_DIR}/Net/src/WebSocketImpl.cpp" ) add_library (_poco_net ${SRCS}) @@ -125,7 +125,7 @@ if (USE_INTERNAL_POCO_LIBRARY) -Wno-deprecated -Wno-extra-semi ) - target_include_directories (_poco_net SYSTEM PUBLIC ${LIBRARY_DIR}/Net/include) + target_include_directories (_poco_net SYSTEM PUBLIC "${LIBRARY_DIR}/Net/include") target_link_libraries (_poco_net PUBLIC Poco::Foundation) else () add_library (Poco::Net UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Net/SSL/CMakeLists.txt b/contrib/poco-cmake/Net/SSL/CMakeLists.txt index 7cc71f441c7..4b3adacfb8f 100644 --- a/contrib/poco-cmake/Net/SSL/CMakeLists.txt +++ b/contrib/poco-cmake/Net/SSL/CMakeLists.txt @@ -1,39 +1,39 @@ if (ENABLE_SSL) if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/AcceptCertificateHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactory.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/ConsoleCertificateHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Context.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSClientSession.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSSessionInstantiator.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSStreamFactory.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/InvalidCertificateHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyConsoleHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyFileHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactory.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactoryMgr.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyPassphraseHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/RejectCertificateHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocket.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocketImpl.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSMTPClientSession.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSocketImpl.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocket.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocketImpl.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Session.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLException.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLManager.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Utility.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/VerificationErrorArgs.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/X509Certificate.cpp + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/AcceptCertificateHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactory.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/ConsoleCertificateHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/Context.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSClientSession.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSSessionInstantiator.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSStreamFactory.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/InvalidCertificateHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyConsoleHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyFileHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactory.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactoryMgr.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyPassphraseHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/RejectCertificateHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocket.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocketImpl.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSMTPClientSession.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSocketImpl.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocket.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocketImpl.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/Session.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLException.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLManager.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/Utility.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/VerificationErrorArgs.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/X509Certificate.cpp" ) add_library (_poco_net_ssl ${SRCS}) add_library (Poco::Net::SSL ALIAS _poco_net_ssl) - target_include_directories (_poco_net_ssl SYSTEM PUBLIC ${LIBRARY_DIR}/NetSSL_OpenSSL/include) + target_include_directories (_poco_net_ssl SYSTEM PUBLIC "${LIBRARY_DIR}/NetSSL_OpenSSL/include") target_link_libraries (_poco_net_ssl PUBLIC Poco::Crypto Poco::Net Poco::Util) else () add_library (Poco::Net::SSL UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Redis/CMakeLists.txt b/contrib/poco-cmake/Redis/CMakeLists.txt index 43d0009101c..b5892addd85 100644 --- a/contrib/poco-cmake/Redis/CMakeLists.txt +++ b/contrib/poco-cmake/Redis/CMakeLists.txt @@ -1,14 +1,14 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Redis/src/Array.cpp - ${LIBRARY_DIR}/Redis/src/AsyncReader.cpp - ${LIBRARY_DIR}/Redis/src/Client.cpp - ${LIBRARY_DIR}/Redis/src/Command.cpp - ${LIBRARY_DIR}/Redis/src/Error.cpp - ${LIBRARY_DIR}/Redis/src/Exception.cpp - ${LIBRARY_DIR}/Redis/src/RedisEventArgs.cpp - ${LIBRARY_DIR}/Redis/src/RedisStream.cpp - ${LIBRARY_DIR}/Redis/src/Type.cpp + "${LIBRARY_DIR}/Redis/src/Array.cpp" + "${LIBRARY_DIR}/Redis/src/AsyncReader.cpp" + "${LIBRARY_DIR}/Redis/src/Client.cpp" + "${LIBRARY_DIR}/Redis/src/Command.cpp" + "${LIBRARY_DIR}/Redis/src/Error.cpp" + "${LIBRARY_DIR}/Redis/src/Exception.cpp" + "${LIBRARY_DIR}/Redis/src/RedisEventArgs.cpp" + "${LIBRARY_DIR}/Redis/src/RedisStream.cpp" + "${LIBRARY_DIR}/Redis/src/Type.cpp" ) add_library (_poco_redis ${SRCS}) @@ -18,7 +18,7 @@ if (USE_INTERNAL_POCO_LIBRARY) target_compile_options (_poco_redis PRIVATE -Wno-deprecated-copy) endif () target_compile_options (_poco_redis PRIVATE -Wno-shadow) - target_include_directories (_poco_redis SYSTEM PUBLIC ${LIBRARY_DIR}/Redis/include) + target_include_directories (_poco_redis SYSTEM PUBLIC "${LIBRARY_DIR}/Redis/include") target_link_libraries (_poco_redis PUBLIC Poco::Net) else () add_library (Poco::Redis UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Util/CMakeLists.txt b/contrib/poco-cmake/Util/CMakeLists.txt index f5af3a5793c..e233e65cfea 100644 --- a/contrib/poco-cmake/Util/CMakeLists.txt +++ b/contrib/poco-cmake/Util/CMakeLists.txt @@ -1,38 +1,38 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Util/src/AbstractConfiguration.cpp - ${LIBRARY_DIR}/Util/src/Application.cpp - ${LIBRARY_DIR}/Util/src/ConfigurationMapper.cpp - ${LIBRARY_DIR}/Util/src/ConfigurationView.cpp - ${LIBRARY_DIR}/Util/src/FilesystemConfiguration.cpp - ${LIBRARY_DIR}/Util/src/HelpFormatter.cpp - ${LIBRARY_DIR}/Util/src/IniFileConfiguration.cpp - ${LIBRARY_DIR}/Util/src/IntValidator.cpp - ${LIBRARY_DIR}/Util/src/JSONConfiguration.cpp - ${LIBRARY_DIR}/Util/src/LayeredConfiguration.cpp - ${LIBRARY_DIR}/Util/src/LoggingConfigurator.cpp - ${LIBRARY_DIR}/Util/src/LoggingSubsystem.cpp - ${LIBRARY_DIR}/Util/src/MapConfiguration.cpp - ${LIBRARY_DIR}/Util/src/Option.cpp - ${LIBRARY_DIR}/Util/src/OptionCallback.cpp - ${LIBRARY_DIR}/Util/src/OptionException.cpp - ${LIBRARY_DIR}/Util/src/OptionProcessor.cpp - ${LIBRARY_DIR}/Util/src/OptionSet.cpp - ${LIBRARY_DIR}/Util/src/PropertyFileConfiguration.cpp - ${LIBRARY_DIR}/Util/src/RegExpValidator.cpp - ${LIBRARY_DIR}/Util/src/ServerApplication.cpp - ${LIBRARY_DIR}/Util/src/Subsystem.cpp - ${LIBRARY_DIR}/Util/src/SystemConfiguration.cpp - ${LIBRARY_DIR}/Util/src/Timer.cpp - ${LIBRARY_DIR}/Util/src/TimerTask.cpp - ${LIBRARY_DIR}/Util/src/Validator.cpp - ${LIBRARY_DIR}/Util/src/XMLConfiguration.cpp + "${LIBRARY_DIR}/Util/src/AbstractConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/Application.cpp" + "${LIBRARY_DIR}/Util/src/ConfigurationMapper.cpp" + "${LIBRARY_DIR}/Util/src/ConfigurationView.cpp" + "${LIBRARY_DIR}/Util/src/FilesystemConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/HelpFormatter.cpp" + "${LIBRARY_DIR}/Util/src/IniFileConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/IntValidator.cpp" + "${LIBRARY_DIR}/Util/src/JSONConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/LayeredConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/LoggingConfigurator.cpp" + "${LIBRARY_DIR}/Util/src/LoggingSubsystem.cpp" + "${LIBRARY_DIR}/Util/src/MapConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/Option.cpp" + "${LIBRARY_DIR}/Util/src/OptionCallback.cpp" + "${LIBRARY_DIR}/Util/src/OptionException.cpp" + "${LIBRARY_DIR}/Util/src/OptionProcessor.cpp" + "${LIBRARY_DIR}/Util/src/OptionSet.cpp" + "${LIBRARY_DIR}/Util/src/PropertyFileConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/RegExpValidator.cpp" + "${LIBRARY_DIR}/Util/src/ServerApplication.cpp" + "${LIBRARY_DIR}/Util/src/Subsystem.cpp" + "${LIBRARY_DIR}/Util/src/SystemConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/Timer.cpp" + "${LIBRARY_DIR}/Util/src/TimerTask.cpp" + "${LIBRARY_DIR}/Util/src/Validator.cpp" + "${LIBRARY_DIR}/Util/src/XMLConfiguration.cpp" ) add_library (_poco_util ${SRCS}) add_library (Poco::Util ALIAS _poco_util) - target_include_directories (_poco_util SYSTEM PUBLIC ${LIBRARY_DIR}/Util/include) + target_include_directories (_poco_util SYSTEM PUBLIC "${LIBRARY_DIR}/Util/include") target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML) else () add_library (Poco::Util UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/XML/CMakeLists.txt b/contrib/poco-cmake/XML/CMakeLists.txt index 448b7e22c7c..af801a65f03 100644 --- a/contrib/poco-cmake/XML/CMakeLists.txt +++ b/contrib/poco-cmake/XML/CMakeLists.txt @@ -2,101 +2,101 @@ if (USE_INTERNAL_POCO_LIBRARY) # Poco::XML (expat) set (SRCS_EXPAT - ${LIBRARY_DIR}/XML/src/xmlrole.c - ${LIBRARY_DIR}/XML/src/xmltok_impl.c - ${LIBRARY_DIR}/XML/src/xmltok_ns.c - ${LIBRARY_DIR}/XML/src/xmltok.c + "${LIBRARY_DIR}/XML/src/xmlrole.c" + "${LIBRARY_DIR}/XML/src/xmltok_impl.c" + "${LIBRARY_DIR}/XML/src/xmltok_ns.c" + "${LIBRARY_DIR}/XML/src/xmltok.c" ) add_library (_poco_xml_expat ${SRCS_EXPAT}) add_library (Poco::XML::Expat ALIAS _poco_xml_expat) - target_include_directories (_poco_xml_expat PUBLIC ${LIBRARY_DIR}/XML/include) + target_include_directories (_poco_xml_expat PUBLIC "${LIBRARY_DIR}/XML/include") # Poco::XML set (SRCS - ${LIBRARY_DIR}/XML/src/AbstractContainerNode.cpp - ${LIBRARY_DIR}/XML/src/AbstractNode.cpp - ${LIBRARY_DIR}/XML/src/Attr.cpp - ${LIBRARY_DIR}/XML/src/Attributes.cpp - ${LIBRARY_DIR}/XML/src/AttributesImpl.cpp - ${LIBRARY_DIR}/XML/src/AttrMap.cpp - ${LIBRARY_DIR}/XML/src/CDATASection.cpp - ${LIBRARY_DIR}/XML/src/CharacterData.cpp - ${LIBRARY_DIR}/XML/src/ChildNodesList.cpp - ${LIBRARY_DIR}/XML/src/Comment.cpp - ${LIBRARY_DIR}/XML/src/ContentHandler.cpp - ${LIBRARY_DIR}/XML/src/DeclHandler.cpp - ${LIBRARY_DIR}/XML/src/DefaultHandler.cpp - ${LIBRARY_DIR}/XML/src/Document.cpp - ${LIBRARY_DIR}/XML/src/DocumentEvent.cpp - ${LIBRARY_DIR}/XML/src/DocumentFragment.cpp - ${LIBRARY_DIR}/XML/src/DocumentType.cpp - ${LIBRARY_DIR}/XML/src/DOMBuilder.cpp - ${LIBRARY_DIR}/XML/src/DOMException.cpp - ${LIBRARY_DIR}/XML/src/DOMImplementation.cpp - ${LIBRARY_DIR}/XML/src/DOMObject.cpp - ${LIBRARY_DIR}/XML/src/DOMParser.cpp - ${LIBRARY_DIR}/XML/src/DOMSerializer.cpp - ${LIBRARY_DIR}/XML/src/DOMWriter.cpp - ${LIBRARY_DIR}/XML/src/DTDHandler.cpp - ${LIBRARY_DIR}/XML/src/DTDMap.cpp - ${LIBRARY_DIR}/XML/src/Element.cpp - ${LIBRARY_DIR}/XML/src/ElementsByTagNameList.cpp - ${LIBRARY_DIR}/XML/src/Entity.cpp - ${LIBRARY_DIR}/XML/src/EntityReference.cpp - ${LIBRARY_DIR}/XML/src/EntityResolver.cpp - ${LIBRARY_DIR}/XML/src/EntityResolverImpl.cpp - ${LIBRARY_DIR}/XML/src/ErrorHandler.cpp - ${LIBRARY_DIR}/XML/src/Event.cpp - ${LIBRARY_DIR}/XML/src/EventDispatcher.cpp - ${LIBRARY_DIR}/XML/src/EventException.cpp - ${LIBRARY_DIR}/XML/src/EventListener.cpp - ${LIBRARY_DIR}/XML/src/EventTarget.cpp - ${LIBRARY_DIR}/XML/src/InputSource.cpp - ${LIBRARY_DIR}/XML/src/LexicalHandler.cpp - ${LIBRARY_DIR}/XML/src/Locator.cpp - ${LIBRARY_DIR}/XML/src/LocatorImpl.cpp - ${LIBRARY_DIR}/XML/src/MutationEvent.cpp - ${LIBRARY_DIR}/XML/src/Name.cpp - ${LIBRARY_DIR}/XML/src/NamedNodeMap.cpp - ${LIBRARY_DIR}/XML/src/NamePool.cpp - ${LIBRARY_DIR}/XML/src/NamespaceStrategy.cpp - ${LIBRARY_DIR}/XML/src/NamespaceSupport.cpp - ${LIBRARY_DIR}/XML/src/Node.cpp - ${LIBRARY_DIR}/XML/src/NodeAppender.cpp - ${LIBRARY_DIR}/XML/src/NodeFilter.cpp - ${LIBRARY_DIR}/XML/src/NodeIterator.cpp - ${LIBRARY_DIR}/XML/src/NodeList.cpp - ${LIBRARY_DIR}/XML/src/Notation.cpp - ${LIBRARY_DIR}/XML/src/ParserEngine.cpp - ${LIBRARY_DIR}/XML/src/ProcessingInstruction.cpp - ${LIBRARY_DIR}/XML/src/QName.cpp - ${LIBRARY_DIR}/XML/src/SAXException.cpp - ${LIBRARY_DIR}/XML/src/SAXParser.cpp - ${LIBRARY_DIR}/XML/src/Text.cpp - ${LIBRARY_DIR}/XML/src/TreeWalker.cpp - ${LIBRARY_DIR}/XML/src/ValueTraits.cpp - ${LIBRARY_DIR}/XML/src/WhitespaceFilter.cpp - ${LIBRARY_DIR}/XML/src/XMLException.cpp - ${LIBRARY_DIR}/XML/src/XMLFilter.cpp - ${LIBRARY_DIR}/XML/src/XMLFilterImpl.cpp - ${LIBRARY_DIR}/XML/src/XMLReader.cpp - ${LIBRARY_DIR}/XML/src/XMLStreamParser.cpp - ${LIBRARY_DIR}/XML/src/XMLStreamParserException.cpp - ${LIBRARY_DIR}/XML/src/XMLString.cpp - ${LIBRARY_DIR}/XML/src/XMLWriter.cpp + "${LIBRARY_DIR}/XML/src/AbstractContainerNode.cpp" + "${LIBRARY_DIR}/XML/src/AbstractNode.cpp" + "${LIBRARY_DIR}/XML/src/Attr.cpp" + "${LIBRARY_DIR}/XML/src/Attributes.cpp" + "${LIBRARY_DIR}/XML/src/AttributesImpl.cpp" + "${LIBRARY_DIR}/XML/src/AttrMap.cpp" + "${LIBRARY_DIR}/XML/src/CDATASection.cpp" + "${LIBRARY_DIR}/XML/src/CharacterData.cpp" + "${LIBRARY_DIR}/XML/src/ChildNodesList.cpp" + "${LIBRARY_DIR}/XML/src/Comment.cpp" + "${LIBRARY_DIR}/XML/src/ContentHandler.cpp" + "${LIBRARY_DIR}/XML/src/DeclHandler.cpp" + "${LIBRARY_DIR}/XML/src/DefaultHandler.cpp" + "${LIBRARY_DIR}/XML/src/Document.cpp" + "${LIBRARY_DIR}/XML/src/DocumentEvent.cpp" + "${LIBRARY_DIR}/XML/src/DocumentFragment.cpp" + "${LIBRARY_DIR}/XML/src/DocumentType.cpp" + "${LIBRARY_DIR}/XML/src/DOMBuilder.cpp" + "${LIBRARY_DIR}/XML/src/DOMException.cpp" + "${LIBRARY_DIR}/XML/src/DOMImplementation.cpp" + "${LIBRARY_DIR}/XML/src/DOMObject.cpp" + "${LIBRARY_DIR}/XML/src/DOMParser.cpp" + "${LIBRARY_DIR}/XML/src/DOMSerializer.cpp" + "${LIBRARY_DIR}/XML/src/DOMWriter.cpp" + "${LIBRARY_DIR}/XML/src/DTDHandler.cpp" + "${LIBRARY_DIR}/XML/src/DTDMap.cpp" + "${LIBRARY_DIR}/XML/src/Element.cpp" + "${LIBRARY_DIR}/XML/src/ElementsByTagNameList.cpp" + "${LIBRARY_DIR}/XML/src/Entity.cpp" + "${LIBRARY_DIR}/XML/src/EntityReference.cpp" + "${LIBRARY_DIR}/XML/src/EntityResolver.cpp" + "${LIBRARY_DIR}/XML/src/EntityResolverImpl.cpp" + "${LIBRARY_DIR}/XML/src/ErrorHandler.cpp" + "${LIBRARY_DIR}/XML/src/Event.cpp" + "${LIBRARY_DIR}/XML/src/EventDispatcher.cpp" + "${LIBRARY_DIR}/XML/src/EventException.cpp" + "${LIBRARY_DIR}/XML/src/EventListener.cpp" + "${LIBRARY_DIR}/XML/src/EventTarget.cpp" + "${LIBRARY_DIR}/XML/src/InputSource.cpp" + "${LIBRARY_DIR}/XML/src/LexicalHandler.cpp" + "${LIBRARY_DIR}/XML/src/Locator.cpp" + "${LIBRARY_DIR}/XML/src/LocatorImpl.cpp" + "${LIBRARY_DIR}/XML/src/MutationEvent.cpp" + "${LIBRARY_DIR}/XML/src/Name.cpp" + "${LIBRARY_DIR}/XML/src/NamedNodeMap.cpp" + "${LIBRARY_DIR}/XML/src/NamePool.cpp" + "${LIBRARY_DIR}/XML/src/NamespaceStrategy.cpp" + "${LIBRARY_DIR}/XML/src/NamespaceSupport.cpp" + "${LIBRARY_DIR}/XML/src/Node.cpp" + "${LIBRARY_DIR}/XML/src/NodeAppender.cpp" + "${LIBRARY_DIR}/XML/src/NodeFilter.cpp" + "${LIBRARY_DIR}/XML/src/NodeIterator.cpp" + "${LIBRARY_DIR}/XML/src/NodeList.cpp" + "${LIBRARY_DIR}/XML/src/Notation.cpp" + "${LIBRARY_DIR}/XML/src/ParserEngine.cpp" + "${LIBRARY_DIR}/XML/src/ProcessingInstruction.cpp" + "${LIBRARY_DIR}/XML/src/QName.cpp" + "${LIBRARY_DIR}/XML/src/SAXException.cpp" + "${LIBRARY_DIR}/XML/src/SAXParser.cpp" + "${LIBRARY_DIR}/XML/src/Text.cpp" + "${LIBRARY_DIR}/XML/src/TreeWalker.cpp" + "${LIBRARY_DIR}/XML/src/ValueTraits.cpp" + "${LIBRARY_DIR}/XML/src/WhitespaceFilter.cpp" + "${LIBRARY_DIR}/XML/src/XMLException.cpp" + "${LIBRARY_DIR}/XML/src/XMLFilter.cpp" + "${LIBRARY_DIR}/XML/src/XMLFilterImpl.cpp" + "${LIBRARY_DIR}/XML/src/XMLReader.cpp" + "${LIBRARY_DIR}/XML/src/XMLStreamParser.cpp" + "${LIBRARY_DIR}/XML/src/XMLStreamParserException.cpp" + "${LIBRARY_DIR}/XML/src/XMLString.cpp" + "${LIBRARY_DIR}/XML/src/XMLWriter.cpp" # expat - ${LIBRARY_DIR}/XML/src/xmlparse.cpp + "${LIBRARY_DIR}/XML/src/xmlparse.cpp" ) add_library (_poco_xml ${SRCS}) add_library (Poco::XML ALIAS _poco_xml) target_compile_options (_poco_xml PRIVATE -Wno-old-style-cast) - target_include_directories (_poco_xml SYSTEM PUBLIC ${LIBRARY_DIR}/XML/include) + target_include_directories (_poco_xml SYSTEM PUBLIC "${LIBRARY_DIR}/XML/include") target_link_libraries (_poco_xml PUBLIC Poco::Foundation Poco::XML::Expat) else () add_library (Poco::XML UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/protobuf-cmake/CMakeLists.txt b/contrib/protobuf-cmake/CMakeLists.txt index 1f8d9b02b3e..a4993030d04 100644 --- a/contrib/protobuf-cmake/CMakeLists.txt +++ b/contrib/protobuf-cmake/CMakeLists.txt @@ -14,4 +14,4 @@ add_subdirectory("${protobuf_SOURCE_DIR}/cmake" "${protobuf_BINARY_DIR}") # We don't want to stop compilation on warnings in protobuf's headers. # The following line overrides the value assigned by the command target_include_directories() in libprotobuf.cmake -set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES ${protobuf_SOURCE_DIR}/src) +set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${protobuf_SOURCE_DIR}/src") diff --git a/contrib/re2 b/contrib/re2 index 7cf8b88e8f7..13ebb377c6a 160000 --- a/contrib/re2 +++ b/contrib/re2 @@ -1 +1 @@ -Subproject commit 7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0 +Subproject commit 13ebb377c6ad763ca61d12dd6f88b1126bd0b911 diff --git a/contrib/re2_st/re2_transform.cmake b/contrib/re2_st/re2_transform.cmake index 2d50d9e8c2a..56a96f45630 100644 --- a/contrib/re2_st/re2_transform.cmake +++ b/contrib/re2_st/re2_transform.cmake @@ -1,7 +1,7 @@ file (READ ${SOURCE_FILENAME} CONTENT) string (REGEX REPLACE "using re2::RE2;" "" CONTENT "${CONTENT}") string (REGEX REPLACE "using re2::LazyRE2;" "" CONTENT "${CONTENT}") -string (REGEX REPLACE "namespace re2" "namespace re2_st" CONTENT "${CONTENT}") +string (REGEX REPLACE "namespace re2 {" "namespace re2_st {" CONTENT "${CONTENT}") string (REGEX REPLACE "re2::" "re2_st::" CONTENT "${CONTENT}") string (REGEX REPLACE "\"re2/" "\"re2_st/" CONTENT "${CONTENT}") string (REGEX REPLACE "(.\\*?_H)" "\\1_ST" CONTENT "${CONTENT}") diff --git a/contrib/replxx-cmake/CMakeLists.txt b/contrib/replxx-cmake/CMakeLists.txt index df17e0ed646..07f24bae25d 100644 --- a/contrib/replxx-cmake/CMakeLists.txt +++ b/contrib/replxx-cmake/CMakeLists.txt @@ -62,7 +62,7 @@ if (NOT LIBRARY_REPLXX OR NOT INCLUDE_REPLXX OR NOT EXTERNAL_REPLXX_WORKS) ) add_library (replxx ${SRCS}) - target_include_directories(replxx SYSTEM PUBLIC ${LIBRARY_DIR}/include) + target_include_directories(replxx SYSTEM PUBLIC "${LIBRARY_DIR}/include") endif () if (COMPILER_CLANG) diff --git a/contrib/rocksdb b/contrib/rocksdb index 54a0decabbc..07c77549a20 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 54a0decabbcf4c0bb5cf7befa9c597f28289bff5 +Subproject commit 07c77549a20b63ff6981b400085eba36bb5c80c4 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 77a30776a4a..bccc9ed5294 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -2,15 +2,6 @@ set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb") list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/") -if (SANITIZE STREQUAL "undefined") - set(WITH_UBSAN ON) -elseif (SANITIZE STREQUAL "address") - set(WITH_ASAN ON) -elseif (SANITIZE STREQUAL "thread") - set(WITH_TSAN ON) -endif() - - set(PORTABLE ON) ## always disable jemalloc for rocksdb by default ## because it introduces non-standard jemalloc APIs @@ -40,7 +31,7 @@ endif() if(MSVC) option(WITH_XPRESS "build with windows built in compression" OFF) - include(${ROCKSDB_SOURCE_DIR}/thirdparty.inc) + include("${ROCKSDB_SOURCE_DIR}/thirdparty.inc") else() if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD") # FreeBSD has jemalloc as default malloc @@ -71,55 +62,18 @@ else() if(WITH_ZSTD) add_definitions(-DZSTD) include_directories(${ZSTD_INCLUDE_DIR}) - include_directories(${ZSTD_INCLUDE_DIR}/common) - include_directories(${ZSTD_INCLUDE_DIR}/dictBuilder) - include_directories(${ZSTD_INCLUDE_DIR}/deprecated) + include_directories("${ZSTD_INCLUDE_DIR}/common") + include_directories("${ZSTD_INCLUDE_DIR}/dictBuilder") + include_directories("${ZSTD_INCLUDE_DIR}/deprecated") list(APPEND THIRDPARTY_LIBS zstd) endif() endif() -string(TIMESTAMP TS "%Y/%m/%d %H:%M:%S" UTC) -set(GIT_DATE_TIME "${TS}" CACHE STRING "the time we first built rocksdb") - -find_package(Git) - -if(GIT_FOUND AND EXISTS "${ROCKSDB_SOURCE_DIR}/.git") - if(WIN32) - execute_process(COMMAND $ENV{COMSPEC} /C ${GIT_EXECUTABLE} -C ${ROCKSDB_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA) - else() - execute_process(COMMAND ${GIT_EXECUTABLE} -C ${ROCKSDB_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA) - endif() -else() - set(GIT_SHA 0) -endif() - -string(REGEX REPLACE "[^0-9a-f]+" "" GIT_SHA "${GIT_SHA}") - -set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/rocksdb_build_version.cc) -configure_file(${ROCKSDB_SOURCE_DIR}/util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY) +set(BUILD_VERSION_CC rocksdb_build_version.cc) add_library(rocksdb_build_version OBJECT ${BUILD_VERSION_CC}) -target_include_directories(rocksdb_build_version PRIVATE - ${ROCKSDB_SOURCE_DIR}/util) -if(MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W4 /wd4127 /wd4800 /wd4996 /wd4351 /wd4100 /wd4204 /wd4324") -else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -Wno-strict-aliasing") - if(MINGW) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format -fno-asynchronous-unwind-tables") - add_definitions(-D_POSIX_C_SOURCE=1) - endif() - if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") - include(CheckCXXCompilerFlag) - CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER) - if(HAVE_OMIT_LEAF_FRAME_POINTER) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer") - endif() - endif() -endif() + +target_include_directories(rocksdb_build_version PRIVATE "${ROCKSDB_SOURCE_DIR}/util") include(CheckCCompilerFlag) if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") @@ -142,14 +96,14 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") endif(HAS_ALTIVEC) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") -if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") +if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC) if(HAS_ARMV8_CRC) message(STATUS " HAS_ARMV8_CRC yes") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") endif(HAS_ARMV8_CRC) -endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") +endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") include(CheckCXXSourceCompiles) @@ -189,50 +143,7 @@ if(HAVE_THREAD_LOCAL) add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL) endif() -option(FAIL_ON_WARNINGS "Treat compile warnings as errors" ON) -if(FAIL_ON_WARNINGS) - if(MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX") - else() # assume GCC - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") - endif() -endif() - -option(WITH_ASAN "build with ASAN" OFF) -if(WITH_ASAN) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address") - if(WITH_JEMALLOC) - message(FATAL "ASAN does not work well with JeMalloc") - endif() -endif() - -option(WITH_TSAN "build with TSAN" OFF) -if(WITH_TSAN) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread -pie") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread -fPIC") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread -fPIC") - if(WITH_JEMALLOC) - message(FATAL "TSAN does not work well with JeMalloc") - endif() -endif() - -option(WITH_UBSAN "build with UBSAN" OFF) -if(WITH_UBSAN) - add_definitions(-DROCKSDB_UBSAN_RUN) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined") - if(WITH_JEMALLOC) - message(FATAL "UBSAN does not work well with JeMalloc") - endif() -endif() - - -if(CMAKE_SYSTEM_NAME MATCHES "Cygwin") - add_definitions(-fno-builtin-memcmp -DCYGWIN) -elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin") +if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_definitions(-DOS_MACOSX) if(CMAKE_SYSTEM_PROCESSOR MATCHES arm) add_definitions(-DIOS_CROSS_COMPILE -DROCKSDB_LITE) @@ -304,9 +215,9 @@ endif() include(CheckCXXSymbolExists) if(CMAKE_SYSTEM_NAME MATCHES "^FreeBSD") - check_cxx_symbol_exists(malloc_usable_size ${ROCKSDB_SOURCE_DIR}/malloc_np.h HAVE_MALLOC_USABLE_SIZE) + check_cxx_symbol_exists(malloc_usable_size "${ROCKSDB_SOURCE_DIR}/malloc_np.h" HAVE_MALLOC_USABLE_SIZE) else() - check_cxx_symbol_exists(malloc_usable_size ${ROCKSDB_SOURCE_DIR}/malloc.h HAVE_MALLOC_USABLE_SIZE) + check_cxx_symbol_exists(malloc_usable_size "${ROCKSDB_SOURCE_DIR}/malloc.h" HAVE_MALLOC_USABLE_SIZE) endif() if(HAVE_MALLOC_USABLE_SIZE) add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE) @@ -323,347 +234,316 @@ if(HAVE_AUXV_GETAUXVAL) endif() include_directories(${ROCKSDB_SOURCE_DIR}) -include_directories(${ROCKSDB_SOURCE_DIR}/include) +include_directories("${ROCKSDB_SOURCE_DIR}/include") if(WITH_FOLLY_DISTRIBUTED_MUTEX) - include_directories(${ROCKSDB_SOURCE_DIR}/third-party/folly) + include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly") endif() find_package(Threads REQUIRED) # Main library source code set(SOURCES - ${ROCKSDB_SOURCE_DIR}/cache/cache.cc - ${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc - ${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc - ${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc - ${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc - ${ROCKSDB_SOURCE_DIR}/db/builder.cc - ${ROCKSDB_SOURCE_DIR}/db/c.cc - ${ROCKSDB_SOURCE_DIR}/db/column_family.cc - ${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc - ${ROCKSDB_SOURCE_DIR}/db/convenience.cc - ${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc - ${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc - ${ROCKSDB_SOURCE_DIR}/db/db_iter.cc - ${ROCKSDB_SOURCE_DIR}/db/dbformat.cc - ${ROCKSDB_SOURCE_DIR}/db/error_handler.cc - ${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc - ${ROCKSDB_SOURCE_DIR}/db/experimental.cc - ${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc - ${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc - ${ROCKSDB_SOURCE_DIR}/db/flush_job.cc - ${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc - ${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc - ${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc - ${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc - ${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc - ${ROCKSDB_SOURCE_DIR}/db/log_reader.cc - ${ROCKSDB_SOURCE_DIR}/db/log_writer.cc - ${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc - ${ROCKSDB_SOURCE_DIR}/db/memtable.cc - ${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc - ${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc - ${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc - ${ROCKSDB_SOURCE_DIR}/db/output_validator.cc - ${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc - ${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc - ${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc - ${ROCKSDB_SOURCE_DIR}/db/repair.cc - ${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc - ${ROCKSDB_SOURCE_DIR}/db/table_cache.cc - ${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc - ${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc - ${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc - ${ROCKSDB_SOURCE_DIR}/db/version_builder.cc - ${ROCKSDB_SOURCE_DIR}/db/version_edit.cc - ${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc - ${ROCKSDB_SOURCE_DIR}/db/version_set.cc - ${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc - ${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc - ${ROCKSDB_SOURCE_DIR}/db/write_batch.cc - ${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc - ${ROCKSDB_SOURCE_DIR}/db/write_controller.cc - ${ROCKSDB_SOURCE_DIR}/db/write_thread.cc - ${ROCKSDB_SOURCE_DIR}/env/env.cc - ${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc - ${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc - ${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc - ${ROCKSDB_SOURCE_DIR}/env/file_system.cc - ${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc - ${ROCKSDB_SOURCE_DIR}/env/mock_env.cc - ${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc - ${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc - ${ROCKSDB_SOURCE_DIR}/file/file_util.cc - ${ROCKSDB_SOURCE_DIR}/file/filename.cc - ${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc - ${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc - ${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc - ${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc - ${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc - ${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc - ${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc - ${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc - ${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc - ${ROCKSDB_SOURCE_DIR}/memory/arena.cc - ${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc - ${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc - ${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc - ${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc - ${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc - ${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc - ${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc - ${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc - ${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc - ${ROCKSDB_SOURCE_DIR}/options/cf_options.cc - ${ROCKSDB_SOURCE_DIR}/options/configurable.cc - ${ROCKSDB_SOURCE_DIR}/options/customizable.cc - ${ROCKSDB_SOURCE_DIR}/options/db_options.cc - ${ROCKSDB_SOURCE_DIR}/options/options.cc - ${ROCKSDB_SOURCE_DIR}/options/options_helper.cc - ${ROCKSDB_SOURCE_DIR}/options/options_parser.cc - ${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc - ${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc - ${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/format.cc - ${ROCKSDB_SOURCE_DIR}/table/get_context.cc - ${ROCKSDB_SOURCE_DIR}/table/iterator.cc - ${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc - ${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc - ${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc - ${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc - ${ROCKSDB_SOURCE_DIR}/table/table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/table_properties.cc - ${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc - ${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc - ${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc - ${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc - ${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc - ${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc - ${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc - ${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc - ${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc - ${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc - ${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc - ${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc - ${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc - ${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc - ${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc - ${ROCKSDB_SOURCE_DIR}/util/coding.cc - ${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc - ${ROCKSDB_SOURCE_DIR}/util/comparator.cc - ${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc - ${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc - ${ROCKSDB_SOURCE_DIR}/util/crc32c.cc - ${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc - ${ROCKSDB_SOURCE_DIR}/util/hash.cc - ${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc - ${ROCKSDB_SOURCE_DIR}/util/random.cc - ${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc - ${ROCKSDB_SOURCE_DIR}/util/slice.cc - ${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc - ${ROCKSDB_SOURCE_DIR}/util/status.cc - ${ROCKSDB_SOURCE_DIR}/util/string_util.cc - ${ROCKSDB_SOURCE_DIR}/util/thread_local.cc - ${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc - ${ROCKSDB_SOURCE_DIR}/util/xxhash.cc - ${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc - ${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc - ${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc - ${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc - ${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc - ${ROCKSDB_SOURCE_DIR}/utilities/debug.cc - ${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc - ${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc - ${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc - ${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc - ${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc - ${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc - ${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc - ${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc - ${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc - ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc - ${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc - ${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc - ${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc + "${ROCKSDB_SOURCE_DIR}/cache/cache.cc" + "${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc" + "${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc" + "${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc" + "${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc" + "${ROCKSDB_SOURCE_DIR}/db/builder.cc" + "${ROCKSDB_SOURCE_DIR}/db/c.cc" + "${ROCKSDB_SOURCE_DIR}/db/column_family.cc" + "${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc" + "${ROCKSDB_SOURCE_DIR}/db/convenience.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_iter.cc" + "${ROCKSDB_SOURCE_DIR}/db/dbformat.cc" + "${ROCKSDB_SOURCE_DIR}/db/error_handler.cc" + "${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc" + "${ROCKSDB_SOURCE_DIR}/db/experimental.cc" + "${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc" + "${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc" + "${ROCKSDB_SOURCE_DIR}/db/flush_job.cc" + "${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc" + "${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc" + "${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc" + "${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc" + "${ROCKSDB_SOURCE_DIR}/db/log_reader.cc" + "${ROCKSDB_SOURCE_DIR}/db/log_writer.cc" + "${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc" + "${ROCKSDB_SOURCE_DIR}/db/memtable.cc" + "${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc" + "${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc" + "${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc" + "${ROCKSDB_SOURCE_DIR}/db/output_validator.cc" + "${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc" + "${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc" + "${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc" + "${ROCKSDB_SOURCE_DIR}/db/repair.cc" + "${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc" + "${ROCKSDB_SOURCE_DIR}/db/table_cache.cc" + "${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc" + "${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc" + "${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc" + "${ROCKSDB_SOURCE_DIR}/db/version_builder.cc" + "${ROCKSDB_SOURCE_DIR}/db/version_edit.cc" + "${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc" + "${ROCKSDB_SOURCE_DIR}/db/version_set.cc" + "${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc" + "${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc" + "${ROCKSDB_SOURCE_DIR}/db/write_batch.cc" + "${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc" + "${ROCKSDB_SOURCE_DIR}/db/write_controller.cc" + "${ROCKSDB_SOURCE_DIR}/db/write_thread.cc" + "${ROCKSDB_SOURCE_DIR}/env/env.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc" + "${ROCKSDB_SOURCE_DIR}/env/file_system.cc" + "${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc" + "${ROCKSDB_SOURCE_DIR}/env/mock_env.cc" + "${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc" + "${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc" + "${ROCKSDB_SOURCE_DIR}/file/file_util.cc" + "${ROCKSDB_SOURCE_DIR}/file/filename.cc" + "${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc" + "${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc" + "${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc" + "${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc" + "${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc" + "${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc" + "${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc" + "${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc" + "${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc" + "${ROCKSDB_SOURCE_DIR}/memory/arena.cc" + "${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc" + "${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc" + "${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc" + "${ROCKSDB_SOURCE_DIR}/options/cf_options.cc" + "${ROCKSDB_SOURCE_DIR}/options/configurable.cc" + "${ROCKSDB_SOURCE_DIR}/options/customizable.cc" + "${ROCKSDB_SOURCE_DIR}/options/db_options.cc" + "${ROCKSDB_SOURCE_DIR}/options/options.cc" + "${ROCKSDB_SOURCE_DIR}/options/options_helper.cc" + "${ROCKSDB_SOURCE_DIR}/options/options_parser.cc" + "${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc" + "${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc" + "${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/format.cc" + "${ROCKSDB_SOURCE_DIR}/table/get_context.cc" + "${ROCKSDB_SOURCE_DIR}/table/iterator.cc" + "${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc" + "${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc" + "${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc" + "${ROCKSDB_SOURCE_DIR}/table/table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/table_properties.cc" + "${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc" + "${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc" + "${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc" + "${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc" + "${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc" + "${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc" + "${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc" + "${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc" + "${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc" + "${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc" + "${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc" + "${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc" + "${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc" + "${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc" + "${ROCKSDB_SOURCE_DIR}/util/coding.cc" + "${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc" + "${ROCKSDB_SOURCE_DIR}/util/comparator.cc" + "${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc" + "${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc" + "${ROCKSDB_SOURCE_DIR}/util/crc32c.cc" + "${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc" + "${ROCKSDB_SOURCE_DIR}/util/hash.cc" + "${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc" + "${ROCKSDB_SOURCE_DIR}/util/random.cc" + "${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc" + "${ROCKSDB_SOURCE_DIR}/util/slice.cc" + "${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc" + "${ROCKSDB_SOURCE_DIR}/util/status.cc" + "${ROCKSDB_SOURCE_DIR}/util/string_util.cc" + "${ROCKSDB_SOURCE_DIR}/util/thread_local.cc" + "${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc" + "${ROCKSDB_SOURCE_DIR}/util/xxhash.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/debug.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc" $) if(HAVE_SSE42 AND NOT MSVC) set_source_files_properties( - ${ROCKSDB_SOURCE_DIR}/util/crc32c.cc + "${ROCKSDB_SOURCE_DIR}/util/crc32c.cc" PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul") endif() if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c - ${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc_asm.S) + "${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c" + "${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc_asm.S") endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") if(HAS_ARMV8_CRC) list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/util/crc32c_arm64.cc) + "${ROCKSDB_SOURCE_DIR}/util/crc32c_arm64.cc") endif(HAS_ARMV8_CRC) -if(WIN32) - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/win/io_win.cc - ${ROCKSDB_SOURCE_DIR}/port/win/env_win.cc - ${ROCKSDB_SOURCE_DIR}/port/win/env_default.cc - ${ROCKSDB_SOURCE_DIR}/port/win/port_win.cc - ${ROCKSDB_SOURCE_DIR}/port/win/win_logger.cc) - if(NOT MINGW) - # Mingw only supports std::thread when using - # posix threads. - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/win/win_thread.cc) - endif() -if(WITH_XPRESS) - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/win/xpress_win.cc) -endif() - -if(WITH_JEMALLOC) - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/win/win_jemalloc.cc) -endif() - -else() - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/port_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/env_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/io_posix.cc) -endif() +list(APPEND SOURCES + "${ROCKSDB_SOURCE_DIR}/port/port_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/io_posix.cc") if(WITH_FOLLY_DISTRIBUTED_MUTEX) list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp) + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp" + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp" + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp" + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp" + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp") endif() set(ROCKSDB_STATIC_LIB rocksdb) -if(WIN32) - set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) -else() - set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT}) -endif() - add_library(${ROCKSDB_STATIC_LIB} STATIC ${SOURCES}) target_link_libraries(${ROCKSDB_STATIC_LIB} PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) diff --git a/contrib/rocksdb-cmake/rocksdb_build_version.cc b/contrib/rocksdb-cmake/rocksdb_build_version.cc new file mode 100644 index 00000000000..8697652ae9f --- /dev/null +++ b/contrib/rocksdb-cmake/rocksdb_build_version.cc @@ -0,0 +1,3 @@ +const char* rocksdb_build_git_sha = "rocksdb_build_git_sha:0"; +const char* rocksdb_build_git_date = "rocksdb_build_git_date:2000-01-01"; +const char* rocksdb_build_compile_date = "2000-01-01"; diff --git a/contrib/simdjson b/contrib/simdjson index 95b4870e20b..8df32cea335 160000 --- a/contrib/simdjson +++ b/contrib/simdjson @@ -1 +1 @@ -Subproject commit 95b4870e20be5f97d9dcf63b23b1c6f520c366c1 +Subproject commit 8df32cea3359cb30120795da6020b3b73da01d38 diff --git a/contrib/simdjson-cmake/CMakeLists.txt b/contrib/simdjson-cmake/CMakeLists.txt index 2fb60b905da..d3bcf6c046c 100644 --- a/contrib/simdjson-cmake/CMakeLists.txt +++ b/contrib/simdjson-cmake/CMakeLists.txt @@ -1,6 +1,6 @@ set(SIMDJSON_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include") set(SIMDJSON_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/simdjson/src") -set(SIMDJSON_SRC ${SIMDJSON_SRC_DIR}/simdjson.cpp) +set(SIMDJSON_SRC "${SIMDJSON_SRC_DIR}/simdjson.cpp") add_library(simdjson ${SIMDJSON_SRC}) target_include_directories(simdjson SYSTEM PUBLIC "${SIMDJSON_INCLUDE_DIR}" PRIVATE "${SIMDJSON_SRC_DIR}") diff --git a/contrib/stats-cmake/CMakeLists.txt b/contrib/stats-cmake/CMakeLists.txt index a159e85a0e3..8279e49c3f0 100644 --- a/contrib/stats-cmake/CMakeLists.txt +++ b/contrib/stats-cmake/CMakeLists.txt @@ -1,7 +1,7 @@ # The stats is a header-only library of probability density functions, # cumulative distribution functions, quantile functions, and random sampling methods. -set(STATS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/stats/include) -set(GCEM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/gcem/include) +set(STATS_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/stats/include") +set(GCEM_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/gcem/include") add_library(stats INTERFACE) diff --git a/contrib/unixodbc-cmake/CMakeLists.txt b/contrib/unixodbc-cmake/CMakeLists.txt index c971c4bdd89..c154533739c 100644 --- a/contrib/unixodbc-cmake/CMakeLists.txt +++ b/contrib/unixodbc-cmake/CMakeLists.txt @@ -2,7 +2,7 @@ if (NOT USE_INTERNAL_ODBC_LIBRARY) return() endif() -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/unixodbc) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/unixodbc") # ltdl @@ -10,14 +10,14 @@ set (SRCS_LTDL # This file is generated by 'libtool' inside libltdl directory and then removed. linux_x86_64/libltdl/libltdlcS.c - ${LIBRARY_DIR}/libltdl/lt__alloc.c - ${LIBRARY_DIR}/libltdl/lt__strl.c - ${LIBRARY_DIR}/libltdl/ltdl.c - ${LIBRARY_DIR}/libltdl/lt_dlloader.c - ${LIBRARY_DIR}/libltdl/slist.c - ${LIBRARY_DIR}/libltdl/lt_error.c - ${LIBRARY_DIR}/libltdl/loaders/dlopen.c - ${LIBRARY_DIR}/libltdl/loaders/preopen.c + "${LIBRARY_DIR}/libltdl/lt__alloc.c" + "${LIBRARY_DIR}/libltdl/lt__strl.c" + "${LIBRARY_DIR}/libltdl/ltdl.c" + "${LIBRARY_DIR}/libltdl/lt_dlloader.c" + "${LIBRARY_DIR}/libltdl/slist.c" + "${LIBRARY_DIR}/libltdl/lt_error.c" + "${LIBRARY_DIR}/libltdl/loaders/dlopen.c" + "${LIBRARY_DIR}/libltdl/loaders/preopen.c" ) add_library (ltdl ${SRCS_LTDL}) @@ -26,8 +26,8 @@ target_include_directories(ltdl PRIVATE linux_x86_64/libltdl PUBLIC - ${LIBRARY_DIR}/libltdl - ${LIBRARY_DIR}/libltdl/libltdl + "${LIBRARY_DIR}/libltdl" + "${LIBRARY_DIR}/libltdl/libltdl" ) target_compile_definitions(ltdl PRIVATE -DHAVE_CONFIG_H -DLTDL -DLTDLOPEN=libltdlc) target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-warning-option -O2) @@ -35,238 +35,238 @@ target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-w # odbc set (SRCS - ${LIBRARY_DIR}/DriverManager/__attribute.c - ${LIBRARY_DIR}/DriverManager/__connection.c - ${LIBRARY_DIR}/DriverManager/__handles.c - ${LIBRARY_DIR}/DriverManager/__info.c - ${LIBRARY_DIR}/DriverManager/__stats.c - ${LIBRARY_DIR}/DriverManager/SQLAllocConnect.c - ${LIBRARY_DIR}/DriverManager/SQLAllocEnv.c - ${LIBRARY_DIR}/DriverManager/SQLAllocHandle.c - ${LIBRARY_DIR}/DriverManager/SQLAllocHandleStd.c - ${LIBRARY_DIR}/DriverManager/SQLAllocStmt.c - ${LIBRARY_DIR}/DriverManager/SQLBindCol.c - ${LIBRARY_DIR}/DriverManager/SQLBindParam.c - ${LIBRARY_DIR}/DriverManager/SQLBindParameter.c - ${LIBRARY_DIR}/DriverManager/SQLBrowseConnect.c - ${LIBRARY_DIR}/DriverManager/SQLBrowseConnectW.c - ${LIBRARY_DIR}/DriverManager/SQLBulkOperations.c - ${LIBRARY_DIR}/DriverManager/SQLCancel.c - ${LIBRARY_DIR}/DriverManager/SQLCancelHandle.c - ${LIBRARY_DIR}/DriverManager/SQLCloseCursor.c - ${LIBRARY_DIR}/DriverManager/SQLColAttribute.c - ${LIBRARY_DIR}/DriverManager/SQLColAttributes.c - ${LIBRARY_DIR}/DriverManager/SQLColAttributesW.c - ${LIBRARY_DIR}/DriverManager/SQLColAttributeW.c - ${LIBRARY_DIR}/DriverManager/SQLColumnPrivileges.c - ${LIBRARY_DIR}/DriverManager/SQLColumnPrivilegesW.c - ${LIBRARY_DIR}/DriverManager/SQLColumns.c - ${LIBRARY_DIR}/DriverManager/SQLColumnsW.c - ${LIBRARY_DIR}/DriverManager/SQLConnect.c - ${LIBRARY_DIR}/DriverManager/SQLConnectW.c - ${LIBRARY_DIR}/DriverManager/SQLCopyDesc.c - ${LIBRARY_DIR}/DriverManager/SQLDataSources.c - ${LIBRARY_DIR}/DriverManager/SQLDataSourcesW.c - ${LIBRARY_DIR}/DriverManager/SQLDescribeCol.c - ${LIBRARY_DIR}/DriverManager/SQLDescribeColW.c - ${LIBRARY_DIR}/DriverManager/SQLDescribeParam.c - ${LIBRARY_DIR}/DriverManager/SQLDisconnect.c - ${LIBRARY_DIR}/DriverManager/SQLDriverConnect.c - ${LIBRARY_DIR}/DriverManager/SQLDriverConnectW.c - ${LIBRARY_DIR}/DriverManager/SQLDrivers.c - ${LIBRARY_DIR}/DriverManager/SQLDriversW.c - ${LIBRARY_DIR}/DriverManager/SQLEndTran.c - ${LIBRARY_DIR}/DriverManager/SQLError.c - ${LIBRARY_DIR}/DriverManager/SQLErrorW.c - ${LIBRARY_DIR}/DriverManager/SQLExecDirect.c - ${LIBRARY_DIR}/DriverManager/SQLExecDirectW.c - ${LIBRARY_DIR}/DriverManager/SQLExecute.c - ${LIBRARY_DIR}/DriverManager/SQLExtendedFetch.c - ${LIBRARY_DIR}/DriverManager/SQLFetch.c - ${LIBRARY_DIR}/DriverManager/SQLFetchScroll.c - ${LIBRARY_DIR}/DriverManager/SQLForeignKeys.c - ${LIBRARY_DIR}/DriverManager/SQLForeignKeysW.c - ${LIBRARY_DIR}/DriverManager/SQLFreeConnect.c - ${LIBRARY_DIR}/DriverManager/SQLFreeEnv.c - ${LIBRARY_DIR}/DriverManager/SQLFreeHandle.c - ${LIBRARY_DIR}/DriverManager/SQLFreeStmt.c - ${LIBRARY_DIR}/DriverManager/SQLGetConnectAttr.c - ${LIBRARY_DIR}/DriverManager/SQLGetConnectAttrW.c - ${LIBRARY_DIR}/DriverManager/SQLGetConnectOption.c - ${LIBRARY_DIR}/DriverManager/SQLGetConnectOptionW.c - ${LIBRARY_DIR}/DriverManager/SQLGetCursorName.c - ${LIBRARY_DIR}/DriverManager/SQLGetCursorNameW.c - ${LIBRARY_DIR}/DriverManager/SQLGetData.c - ${LIBRARY_DIR}/DriverManager/SQLGetDescField.c - ${LIBRARY_DIR}/DriverManager/SQLGetDescFieldW.c - ${LIBRARY_DIR}/DriverManager/SQLGetDescRec.c - ${LIBRARY_DIR}/DriverManager/SQLGetDescRecW.c - ${LIBRARY_DIR}/DriverManager/SQLGetDiagField.c - ${LIBRARY_DIR}/DriverManager/SQLGetDiagFieldW.c - ${LIBRARY_DIR}/DriverManager/SQLGetDiagRec.c - ${LIBRARY_DIR}/DriverManager/SQLGetDiagRecW.c - ${LIBRARY_DIR}/DriverManager/SQLGetEnvAttr.c - ${LIBRARY_DIR}/DriverManager/SQLGetFunctions.c - ${LIBRARY_DIR}/DriverManager/SQLGetInfo.c - ${LIBRARY_DIR}/DriverManager/SQLGetInfoW.c - ${LIBRARY_DIR}/DriverManager/SQLGetStmtAttr.c - ${LIBRARY_DIR}/DriverManager/SQLGetStmtAttrW.c - ${LIBRARY_DIR}/DriverManager/SQLGetStmtOption.c - ${LIBRARY_DIR}/DriverManager/SQLGetTypeInfo.c - ${LIBRARY_DIR}/DriverManager/SQLGetTypeInfoW.c - ${LIBRARY_DIR}/DriverManager/SQLMoreResults.c - ${LIBRARY_DIR}/DriverManager/SQLNativeSql.c - ${LIBRARY_DIR}/DriverManager/SQLNativeSqlW.c - ${LIBRARY_DIR}/DriverManager/SQLNumParams.c - ${LIBRARY_DIR}/DriverManager/SQLNumResultCols.c - ${LIBRARY_DIR}/DriverManager/SQLParamData.c - ${LIBRARY_DIR}/DriverManager/SQLParamOptions.c - ${LIBRARY_DIR}/DriverManager/SQLPrepare.c - ${LIBRARY_DIR}/DriverManager/SQLPrepareW.c - ${LIBRARY_DIR}/DriverManager/SQLPrimaryKeys.c - ${LIBRARY_DIR}/DriverManager/SQLPrimaryKeysW.c - ${LIBRARY_DIR}/DriverManager/SQLProcedureColumns.c - ${LIBRARY_DIR}/DriverManager/SQLProcedureColumnsW.c - ${LIBRARY_DIR}/DriverManager/SQLProcedures.c - ${LIBRARY_DIR}/DriverManager/SQLProceduresW.c - ${LIBRARY_DIR}/DriverManager/SQLPutData.c - ${LIBRARY_DIR}/DriverManager/SQLRowCount.c - ${LIBRARY_DIR}/DriverManager/SQLSetConnectAttr.c - ${LIBRARY_DIR}/DriverManager/SQLSetConnectAttrW.c - ${LIBRARY_DIR}/DriverManager/SQLSetConnectOption.c - ${LIBRARY_DIR}/DriverManager/SQLSetConnectOptionW.c - ${LIBRARY_DIR}/DriverManager/SQLSetCursorName.c - ${LIBRARY_DIR}/DriverManager/SQLSetCursorNameW.c - ${LIBRARY_DIR}/DriverManager/SQLSetDescField.c - ${LIBRARY_DIR}/DriverManager/SQLSetDescFieldW.c - ${LIBRARY_DIR}/DriverManager/SQLSetDescRec.c - ${LIBRARY_DIR}/DriverManager/SQLSetEnvAttr.c - ${LIBRARY_DIR}/DriverManager/SQLSetParam.c - ${LIBRARY_DIR}/DriverManager/SQLSetPos.c - ${LIBRARY_DIR}/DriverManager/SQLSetScrollOptions.c - ${LIBRARY_DIR}/DriverManager/SQLSetStmtAttr.c - ${LIBRARY_DIR}/DriverManager/SQLSetStmtAttrW.c - ${LIBRARY_DIR}/DriverManager/SQLSetStmtOption.c - ${LIBRARY_DIR}/DriverManager/SQLSetStmtOptionW.c - ${LIBRARY_DIR}/DriverManager/SQLSpecialColumns.c - ${LIBRARY_DIR}/DriverManager/SQLSpecialColumnsW.c - ${LIBRARY_DIR}/DriverManager/SQLStatistics.c - ${LIBRARY_DIR}/DriverManager/SQLStatisticsW.c - ${LIBRARY_DIR}/DriverManager/SQLTablePrivileges.c - ${LIBRARY_DIR}/DriverManager/SQLTablePrivilegesW.c - ${LIBRARY_DIR}/DriverManager/SQLTables.c - ${LIBRARY_DIR}/DriverManager/SQLTablesW.c - ${LIBRARY_DIR}/DriverManager/SQLTransact.c - ${LIBRARY_DIR}/ini/_iniDump.c - ${LIBRARY_DIR}/ini/_iniObjectRead.c - ${LIBRARY_DIR}/ini/_iniPropertyRead.c - ${LIBRARY_DIR}/ini/_iniScanUntilObject.c - ${LIBRARY_DIR}/ini/iniAllTrim.c - ${LIBRARY_DIR}/ini/iniAppend.c - ${LIBRARY_DIR}/ini/iniClose.c - ${LIBRARY_DIR}/ini/iniCommit.c - ${LIBRARY_DIR}/ini/iniCursor.c - ${LIBRARY_DIR}/ini/iniDelete.c - ${LIBRARY_DIR}/ini/iniElement.c - ${LIBRARY_DIR}/ini/iniElementCount.c - ${LIBRARY_DIR}/ini/iniGetBookmark.c - ${LIBRARY_DIR}/ini/iniGotoBookmark.c - ${LIBRARY_DIR}/ini/iniObject.c - ${LIBRARY_DIR}/ini/iniObjectDelete.c - ${LIBRARY_DIR}/ini/iniObjectEOL.c - ${LIBRARY_DIR}/ini/iniObjectFirst.c - ${LIBRARY_DIR}/ini/iniObjectInsert.c - ${LIBRARY_DIR}/ini/iniObjectLast.c - ${LIBRARY_DIR}/ini/iniObjectNext.c - ${LIBRARY_DIR}/ini/iniObjectSeek.c - ${LIBRARY_DIR}/ini/iniObjectSeekSure.c - ${LIBRARY_DIR}/ini/iniObjectUpdate.c - ${LIBRARY_DIR}/ini/iniOpen.c - ${LIBRARY_DIR}/ini/iniProperty.c - ${LIBRARY_DIR}/ini/iniPropertyDelete.c - ${LIBRARY_DIR}/ini/iniPropertyEOL.c - ${LIBRARY_DIR}/ini/iniPropertyFirst.c - ${LIBRARY_DIR}/ini/iniPropertyInsert.c - ${LIBRARY_DIR}/ini/iniPropertyLast.c - ${LIBRARY_DIR}/ini/iniPropertyNext.c - ${LIBRARY_DIR}/ini/iniPropertySeek.c - ${LIBRARY_DIR}/ini/iniPropertySeekSure.c - ${LIBRARY_DIR}/ini/iniPropertyUpdate.c - ${LIBRARY_DIR}/ini/iniPropertyValue.c - ${LIBRARY_DIR}/ini/iniToUpper.c - ${LIBRARY_DIR}/ini/iniValue.c - ${LIBRARY_DIR}/log/_logFreeMsg.c - ${LIBRARY_DIR}/log/logClear.c - ${LIBRARY_DIR}/log/logClose.c - ${LIBRARY_DIR}/log/logOn.c - ${LIBRARY_DIR}/log/logOpen.c - ${LIBRARY_DIR}/log/logPeekMsg.c - ${LIBRARY_DIR}/log/logPopMsg.c - ${LIBRARY_DIR}/log/logPushMsg.c - ${LIBRARY_DIR}/lst/_lstAdjustCurrent.c - ${LIBRARY_DIR}/lst/_lstDump.c - ${LIBRARY_DIR}/lst/_lstFreeItem.c - ${LIBRARY_DIR}/lst/_lstNextValidItem.c - ${LIBRARY_DIR}/lst/_lstPrevValidItem.c - ${LIBRARY_DIR}/lst/_lstVisible.c - ${LIBRARY_DIR}/lst/lstAppend.c - ${LIBRARY_DIR}/lst/lstClose.c - ${LIBRARY_DIR}/lst/lstDelete.c - ${LIBRARY_DIR}/lst/lstEOL.c - ${LIBRARY_DIR}/lst/lstFirst.c - ${LIBRARY_DIR}/lst/lstGet.c - ${LIBRARY_DIR}/lst/lstGetBookMark.c - ${LIBRARY_DIR}/lst/lstGoto.c - ${LIBRARY_DIR}/lst/lstGotoBookMark.c - ${LIBRARY_DIR}/lst/lstInsert.c - ${LIBRARY_DIR}/lst/lstLast.c - ${LIBRARY_DIR}/lst/lstNext.c - ${LIBRARY_DIR}/lst/lstOpen.c - ${LIBRARY_DIR}/lst/lstOpenCursor.c - ${LIBRARY_DIR}/lst/lstPrev.c - ${LIBRARY_DIR}/lst/lstSeek.c - ${LIBRARY_DIR}/lst/lstSeekItem.c - ${LIBRARY_DIR}/lst/lstSet.c - ${LIBRARY_DIR}/lst/lstSetFreeFunc.c - ${LIBRARY_DIR}/odbcinst/_logging.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_ConfigModeINI.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_GetEntries.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_GetSections.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_SystemINI.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_UserINI.c - ${LIBRARY_DIR}/odbcinst/_SQLDriverConnectPrompt.c - ${LIBRARY_DIR}/odbcinst/_SQLGetInstalledDrivers.c - ${LIBRARY_DIR}/odbcinst/_SQLWriteInstalledDrivers.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTConstructProperties.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTDestructProperties.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTSetProperty.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperties.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperty.c - ${LIBRARY_DIR}/odbcinst/SQLConfigDataSource.c - ${LIBRARY_DIR}/odbcinst/SQLConfigDriver.c - ${LIBRARY_DIR}/odbcinst/SQLCreateDataSource.c - ${LIBRARY_DIR}/odbcinst/SQLGetAvailableDrivers.c - ${LIBRARY_DIR}/odbcinst/SQLGetConfigMode.c - ${LIBRARY_DIR}/odbcinst/SQLGetInstalledDrivers.c - ${LIBRARY_DIR}/odbcinst/SQLGetPrivateProfileString.c - ${LIBRARY_DIR}/odbcinst/SQLGetTranslator.c - ${LIBRARY_DIR}/odbcinst/SQLInstallDriverEx.c - ${LIBRARY_DIR}/odbcinst/SQLInstallDriverManager.c - ${LIBRARY_DIR}/odbcinst/SQLInstallerError.c - ${LIBRARY_DIR}/odbcinst/SQLInstallODBC.c - ${LIBRARY_DIR}/odbcinst/SQLInstallTranslatorEx.c - ${LIBRARY_DIR}/odbcinst/SQLManageDataSources.c - ${LIBRARY_DIR}/odbcinst/SQLPostInstallerError.c - ${LIBRARY_DIR}/odbcinst/SQLReadFileDSN.c - ${LIBRARY_DIR}/odbcinst/SQLRemoveDriver.c - ${LIBRARY_DIR}/odbcinst/SQLRemoveDriverManager.c - ${LIBRARY_DIR}/odbcinst/SQLRemoveDSNFromIni.c - ${LIBRARY_DIR}/odbcinst/SQLRemoveTranslator.c - ${LIBRARY_DIR}/odbcinst/SQLSetConfigMode.c - ${LIBRARY_DIR}/odbcinst/SQLValidDSN.c - ${LIBRARY_DIR}/odbcinst/SQLWriteDSNToIni.c - ${LIBRARY_DIR}/odbcinst/SQLWriteFileDSN.c - ${LIBRARY_DIR}/odbcinst/SQLWritePrivateProfileString.c + "${LIBRARY_DIR}/DriverManager/__attribute.c" + "${LIBRARY_DIR}/DriverManager/__connection.c" + "${LIBRARY_DIR}/DriverManager/__handles.c" + "${LIBRARY_DIR}/DriverManager/__info.c" + "${LIBRARY_DIR}/DriverManager/__stats.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocEnv.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocHandle.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocHandleStd.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocStmt.c" + "${LIBRARY_DIR}/DriverManager/SQLBindCol.c" + "${LIBRARY_DIR}/DriverManager/SQLBindParam.c" + "${LIBRARY_DIR}/DriverManager/SQLBindParameter.c" + "${LIBRARY_DIR}/DriverManager/SQLBrowseConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLBrowseConnectW.c" + "${LIBRARY_DIR}/DriverManager/SQLBulkOperations.c" + "${LIBRARY_DIR}/DriverManager/SQLCancel.c" + "${LIBRARY_DIR}/DriverManager/SQLCancelHandle.c" + "${LIBRARY_DIR}/DriverManager/SQLCloseCursor.c" + "${LIBRARY_DIR}/DriverManager/SQLColAttribute.c" + "${LIBRARY_DIR}/DriverManager/SQLColAttributes.c" + "${LIBRARY_DIR}/DriverManager/SQLColAttributesW.c" + "${LIBRARY_DIR}/DriverManager/SQLColAttributeW.c" + "${LIBRARY_DIR}/DriverManager/SQLColumnPrivileges.c" + "${LIBRARY_DIR}/DriverManager/SQLColumnPrivilegesW.c" + "${LIBRARY_DIR}/DriverManager/SQLColumns.c" + "${LIBRARY_DIR}/DriverManager/SQLColumnsW.c" + "${LIBRARY_DIR}/DriverManager/SQLConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLConnectW.c" + "${LIBRARY_DIR}/DriverManager/SQLCopyDesc.c" + "${LIBRARY_DIR}/DriverManager/SQLDataSources.c" + "${LIBRARY_DIR}/DriverManager/SQLDataSourcesW.c" + "${LIBRARY_DIR}/DriverManager/SQLDescribeCol.c" + "${LIBRARY_DIR}/DriverManager/SQLDescribeColW.c" + "${LIBRARY_DIR}/DriverManager/SQLDescribeParam.c" + "${LIBRARY_DIR}/DriverManager/SQLDisconnect.c" + "${LIBRARY_DIR}/DriverManager/SQLDriverConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLDriverConnectW.c" + "${LIBRARY_DIR}/DriverManager/SQLDrivers.c" + "${LIBRARY_DIR}/DriverManager/SQLDriversW.c" + "${LIBRARY_DIR}/DriverManager/SQLEndTran.c" + "${LIBRARY_DIR}/DriverManager/SQLError.c" + "${LIBRARY_DIR}/DriverManager/SQLErrorW.c" + "${LIBRARY_DIR}/DriverManager/SQLExecDirect.c" + "${LIBRARY_DIR}/DriverManager/SQLExecDirectW.c" + "${LIBRARY_DIR}/DriverManager/SQLExecute.c" + "${LIBRARY_DIR}/DriverManager/SQLExtendedFetch.c" + "${LIBRARY_DIR}/DriverManager/SQLFetch.c" + "${LIBRARY_DIR}/DriverManager/SQLFetchScroll.c" + "${LIBRARY_DIR}/DriverManager/SQLForeignKeys.c" + "${LIBRARY_DIR}/DriverManager/SQLForeignKeysW.c" + "${LIBRARY_DIR}/DriverManager/SQLFreeConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLFreeEnv.c" + "${LIBRARY_DIR}/DriverManager/SQLFreeHandle.c" + "${LIBRARY_DIR}/DriverManager/SQLFreeStmt.c" + "${LIBRARY_DIR}/DriverManager/SQLGetConnectAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLGetConnectAttrW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetConnectOption.c" + "${LIBRARY_DIR}/DriverManager/SQLGetConnectOptionW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetCursorName.c" + "${LIBRARY_DIR}/DriverManager/SQLGetCursorNameW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetData.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDescField.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDescFieldW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDescRec.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDescRecW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDiagField.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDiagFieldW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDiagRec.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDiagRecW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetEnvAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLGetFunctions.c" + "${LIBRARY_DIR}/DriverManager/SQLGetInfo.c" + "${LIBRARY_DIR}/DriverManager/SQLGetInfoW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetStmtAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLGetStmtAttrW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetStmtOption.c" + "${LIBRARY_DIR}/DriverManager/SQLGetTypeInfo.c" + "${LIBRARY_DIR}/DriverManager/SQLGetTypeInfoW.c" + "${LIBRARY_DIR}/DriverManager/SQLMoreResults.c" + "${LIBRARY_DIR}/DriverManager/SQLNativeSql.c" + "${LIBRARY_DIR}/DriverManager/SQLNativeSqlW.c" + "${LIBRARY_DIR}/DriverManager/SQLNumParams.c" + "${LIBRARY_DIR}/DriverManager/SQLNumResultCols.c" + "${LIBRARY_DIR}/DriverManager/SQLParamData.c" + "${LIBRARY_DIR}/DriverManager/SQLParamOptions.c" + "${LIBRARY_DIR}/DriverManager/SQLPrepare.c" + "${LIBRARY_DIR}/DriverManager/SQLPrepareW.c" + "${LIBRARY_DIR}/DriverManager/SQLPrimaryKeys.c" + "${LIBRARY_DIR}/DriverManager/SQLPrimaryKeysW.c" + "${LIBRARY_DIR}/DriverManager/SQLProcedureColumns.c" + "${LIBRARY_DIR}/DriverManager/SQLProcedureColumnsW.c" + "${LIBRARY_DIR}/DriverManager/SQLProcedures.c" + "${LIBRARY_DIR}/DriverManager/SQLProceduresW.c" + "${LIBRARY_DIR}/DriverManager/SQLPutData.c" + "${LIBRARY_DIR}/DriverManager/SQLRowCount.c" + "${LIBRARY_DIR}/DriverManager/SQLSetConnectAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLSetConnectAttrW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetConnectOption.c" + "${LIBRARY_DIR}/DriverManager/SQLSetConnectOptionW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetCursorName.c" + "${LIBRARY_DIR}/DriverManager/SQLSetCursorNameW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetDescField.c" + "${LIBRARY_DIR}/DriverManager/SQLSetDescFieldW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetDescRec.c" + "${LIBRARY_DIR}/DriverManager/SQLSetEnvAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLSetParam.c" + "${LIBRARY_DIR}/DriverManager/SQLSetPos.c" + "${LIBRARY_DIR}/DriverManager/SQLSetScrollOptions.c" + "${LIBRARY_DIR}/DriverManager/SQLSetStmtAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLSetStmtAttrW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetStmtOption.c" + "${LIBRARY_DIR}/DriverManager/SQLSetStmtOptionW.c" + "${LIBRARY_DIR}/DriverManager/SQLSpecialColumns.c" + "${LIBRARY_DIR}/DriverManager/SQLSpecialColumnsW.c" + "${LIBRARY_DIR}/DriverManager/SQLStatistics.c" + "${LIBRARY_DIR}/DriverManager/SQLStatisticsW.c" + "${LIBRARY_DIR}/DriverManager/SQLTablePrivileges.c" + "${LIBRARY_DIR}/DriverManager/SQLTablePrivilegesW.c" + "${LIBRARY_DIR}/DriverManager/SQLTables.c" + "${LIBRARY_DIR}/DriverManager/SQLTablesW.c" + "${LIBRARY_DIR}/DriverManager/SQLTransact.c" + "${LIBRARY_DIR}/ini/_iniDump.c" + "${LIBRARY_DIR}/ini/_iniObjectRead.c" + "${LIBRARY_DIR}/ini/_iniPropertyRead.c" + "${LIBRARY_DIR}/ini/_iniScanUntilObject.c" + "${LIBRARY_DIR}/ini/iniAllTrim.c" + "${LIBRARY_DIR}/ini/iniAppend.c" + "${LIBRARY_DIR}/ini/iniClose.c" + "${LIBRARY_DIR}/ini/iniCommit.c" + "${LIBRARY_DIR}/ini/iniCursor.c" + "${LIBRARY_DIR}/ini/iniDelete.c" + "${LIBRARY_DIR}/ini/iniElement.c" + "${LIBRARY_DIR}/ini/iniElementCount.c" + "${LIBRARY_DIR}/ini/iniGetBookmark.c" + "${LIBRARY_DIR}/ini/iniGotoBookmark.c" + "${LIBRARY_DIR}/ini/iniObject.c" + "${LIBRARY_DIR}/ini/iniObjectDelete.c" + "${LIBRARY_DIR}/ini/iniObjectEOL.c" + "${LIBRARY_DIR}/ini/iniObjectFirst.c" + "${LIBRARY_DIR}/ini/iniObjectInsert.c" + "${LIBRARY_DIR}/ini/iniObjectLast.c" + "${LIBRARY_DIR}/ini/iniObjectNext.c" + "${LIBRARY_DIR}/ini/iniObjectSeek.c" + "${LIBRARY_DIR}/ini/iniObjectSeekSure.c" + "${LIBRARY_DIR}/ini/iniObjectUpdate.c" + "${LIBRARY_DIR}/ini/iniOpen.c" + "${LIBRARY_DIR}/ini/iniProperty.c" + "${LIBRARY_DIR}/ini/iniPropertyDelete.c" + "${LIBRARY_DIR}/ini/iniPropertyEOL.c" + "${LIBRARY_DIR}/ini/iniPropertyFirst.c" + "${LIBRARY_DIR}/ini/iniPropertyInsert.c" + "${LIBRARY_DIR}/ini/iniPropertyLast.c" + "${LIBRARY_DIR}/ini/iniPropertyNext.c" + "${LIBRARY_DIR}/ini/iniPropertySeek.c" + "${LIBRARY_DIR}/ini/iniPropertySeekSure.c" + "${LIBRARY_DIR}/ini/iniPropertyUpdate.c" + "${LIBRARY_DIR}/ini/iniPropertyValue.c" + "${LIBRARY_DIR}/ini/iniToUpper.c" + "${LIBRARY_DIR}/ini/iniValue.c" + "${LIBRARY_DIR}/log/_logFreeMsg.c" + "${LIBRARY_DIR}/log/logClear.c" + "${LIBRARY_DIR}/log/logClose.c" + "${LIBRARY_DIR}/log/logOn.c" + "${LIBRARY_DIR}/log/logOpen.c" + "${LIBRARY_DIR}/log/logPeekMsg.c" + "${LIBRARY_DIR}/log/logPopMsg.c" + "${LIBRARY_DIR}/log/logPushMsg.c" + "${LIBRARY_DIR}/lst/_lstAdjustCurrent.c" + "${LIBRARY_DIR}/lst/_lstDump.c" + "${LIBRARY_DIR}/lst/_lstFreeItem.c" + "${LIBRARY_DIR}/lst/_lstNextValidItem.c" + "${LIBRARY_DIR}/lst/_lstPrevValidItem.c" + "${LIBRARY_DIR}/lst/_lstVisible.c" + "${LIBRARY_DIR}/lst/lstAppend.c" + "${LIBRARY_DIR}/lst/lstClose.c" + "${LIBRARY_DIR}/lst/lstDelete.c" + "${LIBRARY_DIR}/lst/lstEOL.c" + "${LIBRARY_DIR}/lst/lstFirst.c" + "${LIBRARY_DIR}/lst/lstGet.c" + "${LIBRARY_DIR}/lst/lstGetBookMark.c" + "${LIBRARY_DIR}/lst/lstGoto.c" + "${LIBRARY_DIR}/lst/lstGotoBookMark.c" + "${LIBRARY_DIR}/lst/lstInsert.c" + "${LIBRARY_DIR}/lst/lstLast.c" + "${LIBRARY_DIR}/lst/lstNext.c" + "${LIBRARY_DIR}/lst/lstOpen.c" + "${LIBRARY_DIR}/lst/lstOpenCursor.c" + "${LIBRARY_DIR}/lst/lstPrev.c" + "${LIBRARY_DIR}/lst/lstSeek.c" + "${LIBRARY_DIR}/lst/lstSeekItem.c" + "${LIBRARY_DIR}/lst/lstSet.c" + "${LIBRARY_DIR}/lst/lstSetFreeFunc.c" + "${LIBRARY_DIR}/odbcinst/_logging.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_ConfigModeINI.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_GetEntries.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_GetSections.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_SystemINI.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_UserINI.c" + "${LIBRARY_DIR}/odbcinst/_SQLDriverConnectPrompt.c" + "${LIBRARY_DIR}/odbcinst/_SQLGetInstalledDrivers.c" + "${LIBRARY_DIR}/odbcinst/_SQLWriteInstalledDrivers.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTConstructProperties.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTDestructProperties.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTSetProperty.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperties.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperty.c" + "${LIBRARY_DIR}/odbcinst/SQLConfigDataSource.c" + "${LIBRARY_DIR}/odbcinst/SQLConfigDriver.c" + "${LIBRARY_DIR}/odbcinst/SQLCreateDataSource.c" + "${LIBRARY_DIR}/odbcinst/SQLGetAvailableDrivers.c" + "${LIBRARY_DIR}/odbcinst/SQLGetConfigMode.c" + "${LIBRARY_DIR}/odbcinst/SQLGetInstalledDrivers.c" + "${LIBRARY_DIR}/odbcinst/SQLGetPrivateProfileString.c" + "${LIBRARY_DIR}/odbcinst/SQLGetTranslator.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallDriverEx.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallDriverManager.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallerError.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallODBC.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallTranslatorEx.c" + "${LIBRARY_DIR}/odbcinst/SQLManageDataSources.c" + "${LIBRARY_DIR}/odbcinst/SQLPostInstallerError.c" + "${LIBRARY_DIR}/odbcinst/SQLReadFileDSN.c" + "${LIBRARY_DIR}/odbcinst/SQLRemoveDriver.c" + "${LIBRARY_DIR}/odbcinst/SQLRemoveDriverManager.c" + "${LIBRARY_DIR}/odbcinst/SQLRemoveDSNFromIni.c" + "${LIBRARY_DIR}/odbcinst/SQLRemoveTranslator.c" + "${LIBRARY_DIR}/odbcinst/SQLSetConfigMode.c" + "${LIBRARY_DIR}/odbcinst/SQLValidDSN.c" + "${LIBRARY_DIR}/odbcinst/SQLWriteDSNToIni.c" + "${LIBRARY_DIR}/odbcinst/SQLWriteFileDSN.c" + "${LIBRARY_DIR}/odbcinst/SQLWritePrivateProfileString.c" ) add_library (unixodbc ${SRCS}) @@ -280,7 +280,7 @@ target_include_directories (unixodbc linux_x86_64/private PUBLIC linux_x86_64 - ${LIBRARY_DIR}/include + "${LIBRARY_DIR}/include" ) target_compile_definitions (unixodbc PRIVATE -DHAVE_CONFIG_H) target_compile_options (unixodbc diff --git a/contrib/yaml-cpp b/contrib/yaml-cpp new file mode 160000 index 00000000000..0c86adac6d1 --- /dev/null +++ b/contrib/yaml-cpp @@ -0,0 +1 @@ +Subproject commit 0c86adac6d117ee2b4afcedb8ade19036ca0327d diff --git a/contrib/yaml-cpp-cmake/CMakeLists.txt b/contrib/yaml-cpp-cmake/CMakeLists.txt new file mode 100644 index 00000000000..ed0287de110 --- /dev/null +++ b/contrib/yaml-cpp-cmake/CMakeLists.txt @@ -0,0 +1,39 @@ +set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/yaml-cpp) + +set (SRCS + ${LIBRARY_DIR}/src/binary.cpp + ${LIBRARY_DIR}/src/emitterutils.cpp + ${LIBRARY_DIR}/src/null.cpp + ${LIBRARY_DIR}/src/scantoken.cpp + ${LIBRARY_DIR}/src/convert.cpp + ${LIBRARY_DIR}/src/exceptions.cpp + ${LIBRARY_DIR}/src/ostream_wrapper.cpp + ${LIBRARY_DIR}/src/simplekey.cpp + ${LIBRARY_DIR}/src/depthguard.cpp + ${LIBRARY_DIR}/src/exp.cpp + ${LIBRARY_DIR}/src/parse.cpp + ${LIBRARY_DIR}/src/singledocparser.cpp + ${LIBRARY_DIR}/src/directives.cpp + ${LIBRARY_DIR}/src/memory.cpp + ${LIBRARY_DIR}/src/parser.cpp + ${LIBRARY_DIR}/src/stream.cpp + ${LIBRARY_DIR}/src/emit.cpp + ${LIBRARY_DIR}/src/nodebuilder.cpp + ${LIBRARY_DIR}/src/regex_yaml.cpp + ${LIBRARY_DIR}/src/tag.cpp + ${LIBRARY_DIR}/src/emitfromevents.cpp + ${LIBRARY_DIR}/src/node.cpp + ${LIBRARY_DIR}/src/scanner.cpp + ${LIBRARY_DIR}/src/emitter.cpp + ${LIBRARY_DIR}/src/node_data.cpp + ${LIBRARY_DIR}/src/scanscalar.cpp + ${LIBRARY_DIR}/src/emitterstate.cpp + ${LIBRARY_DIR}/src/nodeevents.cpp + ${LIBRARY_DIR}/src/scantag.cpp +) + +add_library (yaml-cpp ${SRCS}) + + +target_include_directories(yaml-cpp PRIVATE ${LIBRARY_DIR}/include/yaml-cpp) +target_include_directories(yaml-cpp SYSTEM BEFORE PUBLIC ${LIBRARY_DIR}/include) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index 6fd1846c8b8..db232d30b4c 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit 6fd1846c8b8f59436fe2dd752d0f316ddbb64df6 +Subproject commit db232d30b4c72fd58e6d7eae2d12cebf9c3d90db diff --git a/contrib/zstd b/contrib/zstd index 10f0e6993f9..a488ba114ec 160000 --- a/contrib/zstd +++ b/contrib/zstd @@ -1 +1 @@ -Subproject commit 10f0e6993f9d2f682da6d04aa2385b7d53cbb4ee +Subproject commit a488ba114ec17ea1054b9057c26a046fc122b3b6 diff --git a/contrib/zstd-cmake/CMakeLists.txt b/contrib/zstd-cmake/CMakeLists.txt index 58a827761ea..226ee1a8067 100644 --- a/contrib/zstd-cmake/CMakeLists.txt +++ b/contrib/zstd-cmake/CMakeLists.txt @@ -39,108 +39,113 @@ function(GetLibraryVersion _content _outputVar1 _outputVar2 _outputVar3) endfunction() # Define library directory, where sources and header files are located -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/zstd/lib) -INCLUDE_DIRECTORIES(BEFORE ${LIBRARY_DIR} ${LIBRARY_DIR}/common) +SET(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/zstd/lib") +INCLUDE_DIRECTORIES(BEFORE ${LIBRARY_DIR} "${LIBRARY_DIR}/common") # Read file content -FILE(READ ${LIBRARY_DIR}/zstd.h HEADER_CONTENT) +FILE(READ "${LIBRARY_DIR}/zstd.h" HEADER_CONTENT) # Parse version GetLibraryVersion("${HEADER_CONTENT}" LIBVER_MAJOR LIBVER_MINOR LIBVER_RELEASE) MESSAGE(STATUS "ZSTD VERSION ${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}") # cd contrib/zstd/lib -# find . -name '*.c' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ ${LIBRARY_DIR}/' +# find . -name '*.c' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ "${LIBRARY_DIR}/"' SET(Sources - ${LIBRARY_DIR}/common/debug.c - ${LIBRARY_DIR}/common/entropy_common.c - ${LIBRARY_DIR}/common/error_private.c - ${LIBRARY_DIR}/common/fse_decompress.c - ${LIBRARY_DIR}/common/pool.c - ${LIBRARY_DIR}/common/threading.c - ${LIBRARY_DIR}/common/xxhash.c - ${LIBRARY_DIR}/common/zstd_common.c - ${LIBRARY_DIR}/compress/fse_compress.c - ${LIBRARY_DIR}/compress/hist.c - ${LIBRARY_DIR}/compress/huf_compress.c - ${LIBRARY_DIR}/compress/zstd_compress.c - ${LIBRARY_DIR}/compress/zstd_compress_literals.c - ${LIBRARY_DIR}/compress/zstd_compress_sequences.c - ${LIBRARY_DIR}/compress/zstd_double_fast.c - ${LIBRARY_DIR}/compress/zstd_fast.c - ${LIBRARY_DIR}/compress/zstd_lazy.c - ${LIBRARY_DIR}/compress/zstd_ldm.c - ${LIBRARY_DIR}/compress/zstdmt_compress.c - ${LIBRARY_DIR}/compress/zstd_opt.c - ${LIBRARY_DIR}/decompress/huf_decompress.c - ${LIBRARY_DIR}/decompress/zstd_ddict.c - ${LIBRARY_DIR}/decompress/zstd_decompress_block.c - ${LIBRARY_DIR}/decompress/zstd_decompress.c - ${LIBRARY_DIR}/dictBuilder/cover.c - ${LIBRARY_DIR}/dictBuilder/divsufsort.c - ${LIBRARY_DIR}/dictBuilder/fastcover.c - ${LIBRARY_DIR}/dictBuilder/zdict.c) + "${LIBRARY_DIR}/common/debug.c" + "${LIBRARY_DIR}/common/entropy_common.c" + "${LIBRARY_DIR}/common/error_private.c" + "${LIBRARY_DIR}/common/fse_decompress.c" + "${LIBRARY_DIR}/common/pool.c" + "${LIBRARY_DIR}/common/threading.c" + "${LIBRARY_DIR}/common/xxhash.c" + "${LIBRARY_DIR}/common/zstd_common.c" + "${LIBRARY_DIR}/compress/fse_compress.c" + "${LIBRARY_DIR}/compress/hist.c" + "${LIBRARY_DIR}/compress/huf_compress.c" + "${LIBRARY_DIR}/compress/zstd_compress.c" + "${LIBRARY_DIR}/compress/zstd_compress_literals.c" + "${LIBRARY_DIR}/compress/zstd_compress_sequences.c" + "${LIBRARY_DIR}/compress/zstd_compress_superblock.c" + "${LIBRARY_DIR}/compress/zstd_double_fast.c" + "${LIBRARY_DIR}/compress/zstd_fast.c" + "${LIBRARY_DIR}/compress/zstd_lazy.c" + "${LIBRARY_DIR}/compress/zstd_ldm.c" + "${LIBRARY_DIR}/compress/zstdmt_compress.c" + "${LIBRARY_DIR}/compress/zstd_opt.c" + "${LIBRARY_DIR}/decompress/huf_decompress.c" + "${LIBRARY_DIR}/decompress/zstd_ddict.c" + "${LIBRARY_DIR}/decompress/zstd_decompress_block.c" + "${LIBRARY_DIR}/decompress/zstd_decompress.c" + "${LIBRARY_DIR}/dictBuilder/cover.c" + "${LIBRARY_DIR}/dictBuilder/divsufsort.c" + "${LIBRARY_DIR}/dictBuilder/fastcover.c" + "${LIBRARY_DIR}/dictBuilder/zdict.c") # cd contrib/zstd/lib -# find . -name '*.h' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ ${LIBRARY_DIR}/' +# find . -name '*.h' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ "${LIBRARY_DIR}/"' SET(Headers - ${LIBRARY_DIR}/common/bitstream.h - ${LIBRARY_DIR}/common/compiler.h - ${LIBRARY_DIR}/common/cpu.h - ${LIBRARY_DIR}/common/debug.h - ${LIBRARY_DIR}/common/error_private.h - ${LIBRARY_DIR}/common/fse.h - ${LIBRARY_DIR}/common/huf.h - ${LIBRARY_DIR}/common/mem.h - ${LIBRARY_DIR}/common/pool.h - ${LIBRARY_DIR}/common/threading.h - ${LIBRARY_DIR}/common/xxhash.h - ${LIBRARY_DIR}/common/zstd_errors.h - ${LIBRARY_DIR}/common/zstd_internal.h - ${LIBRARY_DIR}/compress/hist.h - ${LIBRARY_DIR}/compress/zstd_compress_internal.h - ${LIBRARY_DIR}/compress/zstd_compress_literals.h - ${LIBRARY_DIR}/compress/zstd_compress_sequences.h - ${LIBRARY_DIR}/compress/zstd_cwksp.h - ${LIBRARY_DIR}/compress/zstd_double_fast.h - ${LIBRARY_DIR}/compress/zstd_fast.h - ${LIBRARY_DIR}/compress/zstd_lazy.h - ${LIBRARY_DIR}/compress/zstd_ldm.h - ${LIBRARY_DIR}/compress/zstdmt_compress.h - ${LIBRARY_DIR}/compress/zstd_opt.h - ${LIBRARY_DIR}/decompress/zstd_ddict.h - ${LIBRARY_DIR}/decompress/zstd_decompress_block.h - ${LIBRARY_DIR}/decompress/zstd_decompress_internal.h - ${LIBRARY_DIR}/dictBuilder/cover.h - ${LIBRARY_DIR}/dictBuilder/divsufsort.h - ${LIBRARY_DIR}/dictBuilder/zdict.h - ${LIBRARY_DIR}/zstd.h) + "${LIBRARY_DIR}/common/bitstream.h" + "${LIBRARY_DIR}/common/compiler.h" + "${LIBRARY_DIR}/common/cpu.h" + "${LIBRARY_DIR}/common/debug.h" + "${LIBRARY_DIR}/common/error_private.h" + "${LIBRARY_DIR}/common/fse.h" + "${LIBRARY_DIR}/common/huf.h" + "${LIBRARY_DIR}/common/mem.h" + "${LIBRARY_DIR}/common/pool.h" + "${LIBRARY_DIR}/common/threading.h" + "${LIBRARY_DIR}/common/xxhash.h" + "${LIBRARY_DIR}/common/zstd_deps.h" + "${LIBRARY_DIR}/common/zstd_internal.h" + "${LIBRARY_DIR}/common/zstd_trace.h" + "${LIBRARY_DIR}/compress/hist.h" + "${LIBRARY_DIR}/compress/zstd_compress_internal.h" + "${LIBRARY_DIR}/compress/zstd_compress_literals.h" + "${LIBRARY_DIR}/compress/zstd_compress_sequences.h" + "${LIBRARY_DIR}/compress/zstd_compress_superblock.h" + "${LIBRARY_DIR}/compress/zstd_cwksp.h" + "${LIBRARY_DIR}/compress/zstd_double_fast.h" + "${LIBRARY_DIR}/compress/zstd_fast.h" + "${LIBRARY_DIR}/compress/zstd_lazy.h" + "${LIBRARY_DIR}/compress/zstd_ldm_geartab.h" + "${LIBRARY_DIR}/compress/zstd_ldm.h" + "${LIBRARY_DIR}/compress/zstdmt_compress.h" + "${LIBRARY_DIR}/compress/zstd_opt.h" + "${LIBRARY_DIR}/decompress/zstd_ddict.h" + "${LIBRARY_DIR}/decompress/zstd_decompress_block.h" + "${LIBRARY_DIR}/decompress/zstd_decompress_internal.h" + "${LIBRARY_DIR}/dictBuilder/cover.h" + "${LIBRARY_DIR}/dictBuilder/divsufsort.h" + "${LIBRARY_DIR}/zdict.h" + "${LIBRARY_DIR}/zstd_errors.h" + "${LIBRARY_DIR}/zstd.h") SET(ZSTD_LEGACY_SUPPORT true) IF (ZSTD_LEGACY_SUPPORT) - SET(LIBRARY_LEGACY_DIR ${LIBRARY_DIR}/legacy) + SET(LIBRARY_LEGACY_DIR "${LIBRARY_DIR}/legacy") INCLUDE_DIRECTORIES(BEFORE ${LIBRARY_LEGACY_DIR}) ADD_DEFINITIONS(-D ZSTD_LEGACY_SUPPORT=1) SET(Sources ${Sources} - ${LIBRARY_LEGACY_DIR}/zstd_v01.c - ${LIBRARY_LEGACY_DIR}/zstd_v02.c - ${LIBRARY_LEGACY_DIR}/zstd_v03.c - ${LIBRARY_LEGACY_DIR}/zstd_v04.c - ${LIBRARY_LEGACY_DIR}/zstd_v05.c - ${LIBRARY_LEGACY_DIR}/zstd_v06.c - ${LIBRARY_LEGACY_DIR}/zstd_v07.c) + "${LIBRARY_LEGACY_DIR}/zstd_v01.c" + "${LIBRARY_LEGACY_DIR}/zstd_v02.c" + "${LIBRARY_LEGACY_DIR}/zstd_v03.c" + "${LIBRARY_LEGACY_DIR}/zstd_v04.c" + "${LIBRARY_LEGACY_DIR}/zstd_v05.c" + "${LIBRARY_LEGACY_DIR}/zstd_v06.c" + "${LIBRARY_LEGACY_DIR}/zstd_v07.c") SET(Headers ${Headers} - ${LIBRARY_LEGACY_DIR}/zstd_legacy.h - ${LIBRARY_LEGACY_DIR}/zstd_v01.h - ${LIBRARY_LEGACY_DIR}/zstd_v02.h - ${LIBRARY_LEGACY_DIR}/zstd_v03.h - ${LIBRARY_LEGACY_DIR}/zstd_v04.h - ${LIBRARY_LEGACY_DIR}/zstd_v05.h - ${LIBRARY_LEGACY_DIR}/zstd_v06.h - ${LIBRARY_LEGACY_DIR}/zstd_v07.h) + "${LIBRARY_LEGACY_DIR}/zstd_legacy.h" + "${LIBRARY_LEGACY_DIR}/zstd_v01.h" + "${LIBRARY_LEGACY_DIR}/zstd_v02.h" + "${LIBRARY_LEGACY_DIR}/zstd_v03.h" + "${LIBRARY_LEGACY_DIR}/zstd_v04.h" + "${LIBRARY_LEGACY_DIR}/zstd_v05.h" + "${LIBRARY_LEGACY_DIR}/zstd_v06.h" + "${LIBRARY_LEGACY_DIR}/zstd_v07.h") ENDIF (ZSTD_LEGACY_SUPPORT) ADD_LIBRARY(zstd ${Sources} ${Headers}) diff --git a/debian/changelog b/debian/changelog index 23d63b41099..e1c46dae3a8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (21.4.1.1) unstable; urgency=low +clickhouse (21.7.1.1) unstable; urgency=low * Modified source code - -- clickhouse-release Sat, 06 Mar 2021 14:43:27 +0300 + -- clickhouse-release Thu, 20 May 2021 22:23:29 +0300 diff --git a/debian/clickhouse-client.postinst b/debian/clickhouse-client.postinst deleted file mode 100644 index 480bf2f5c67..00000000000 --- a/debian/clickhouse-client.postinst +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh -set -e - -CLICKHOUSE_USER=${CLICKHOUSE_USER=clickhouse} - -mkdir -p /etc/clickhouse-client/conf.d - -#DEBHELPER# diff --git a/debian/clickhouse-common-static.install b/debian/clickhouse-common-static.install index 17c955a12a9..087a6dbba8f 100644 --- a/debian/clickhouse-common-static.install +++ b/debian/clickhouse-common-static.install @@ -1,5 +1,5 @@ usr/bin/clickhouse usr/bin/clickhouse-odbc-bridge +usr/bin/clickhouse-library-bridge usr/bin/clickhouse-extract-from-config usr/share/bash-completion/completions -etc/security/limits.d/clickhouse.conf diff --git a/debian/clickhouse-server.config b/debian/clickhouse-server.config deleted file mode 100644 index 636ff7f4da7..00000000000 --- a/debian/clickhouse-server.config +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh -e - -test -f /usr/share/debconf/confmodule && . /usr/share/debconf/confmodule - -db_fget clickhouse-server/default-password seen || true -password_seen="$RET" - -if [ "$1" = "reconfigure" ]; then - password_seen=false -fi - -if [ "$password_seen" != "true" ]; then - db_input high clickhouse-server/default-password || true - db_go || true -fi -db_go || true diff --git a/debian/clickhouse-server.postinst b/debian/clickhouse-server.postinst index dc876f45954..419c13e3daf 100644 --- a/debian/clickhouse-server.postinst +++ b/debian/clickhouse-server.postinst @@ -23,11 +23,13 @@ if [ ! -f "/etc/debian_version" ]; then fi if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then + + ${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}" + if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then # if old rc.d service present - remove it if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then /usr/sbin/update-rc.d clickhouse-server remove - echo "ClickHouse init script has migrated to systemd. Please manually stop old server and restart the service: sudo killall clickhouse-server && sleep 5 && sudo service clickhouse-server restart" fi /bin/systemctl daemon-reload @@ -38,10 +40,8 @@ if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then if [ -x "/usr/sbin/update-rc.d" ]; then /usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $? else - echo # TODO [ "$OS" = "rhel" ] || [ "$OS" = "centos" ] || [ "$OS" = "fedora" ] + echo # Other OS fi fi fi - - ${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}" fi diff --git a/debian/clickhouse-server.preinst b/debian/clickhouse-server.preinst deleted file mode 100644 index 3529aefa7da..00000000000 --- a/debian/clickhouse-server.preinst +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -if [ "$1" = "upgrade" ]; then - # Return etc/cron.d/clickhouse-server to original state - service clickhouse-server disable_cron ||: -fi - -#DEBHELPER# diff --git a/debian/clickhouse-server.prerm b/debian/clickhouse-server.prerm deleted file mode 100644 index 02e855a7125..00000000000 --- a/debian/clickhouse-server.prerm +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -if [ "$1" = "upgrade" ] || [ "$1" = "remove" ]; then - # Return etc/cron.d/clickhouse-server to original state - service clickhouse-server disable_cron ||: -fi diff --git a/debian/clickhouse-server.templates b/debian/clickhouse-server.templates deleted file mode 100644 index dd55824e15c..00000000000 --- a/debian/clickhouse-server.templates +++ /dev/null @@ -1,3 +0,0 @@ -Template: clickhouse-server/default-password -Type: password -Description: Enter password for default user: diff --git a/debian/clickhouse.limits b/debian/clickhouse.limits deleted file mode 100644 index aca44082c4e..00000000000 --- a/debian/clickhouse.limits +++ /dev/null @@ -1,2 +0,0 @@ -clickhouse soft nofile 262144 -clickhouse hard nofile 262144 diff --git a/debian/pbuilder-hooks/A00ccache b/debian/pbuilder-hooks/A00ccache deleted file mode 100755 index 575358f31eb..00000000000 --- a/debian/pbuilder-hooks/A00ccache +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# set -x - -# CCACHEDIR - for pbuilder ; CCACHE_DIR - for ccache - -echo "CCACHEDIR=$CCACHEDIR CCACHE_DIR=$CCACHE_DIR SET_CCACHEDIR=$SET_CCACHEDIR" - -[ -z "$CCACHE_DIR" ] && export CCACHE_DIR=${CCACHEDIR:=${SET_CCACHEDIR=/var/cache/pbuilder/ccache}} - -if [ -n "$CCACHE_DIR" ]; then - mkdir -p $CCACHE_DIR $DISTCC_DIR ||: - chown -R $BUILDUSERID:$BUILDUSERID $CCACHE_DIR $DISTCC_DIR ||: - chmod -R a+rwx $CCACHE_DIR $DISTCC_DIR ||: -fi - -[ $CCACHE_PREFIX = 'distcc' ] && mkdir -p $DISTCC_DIR && echo "localhost/`nproc`" >> $DISTCC_DIR/hosts && distcc --show-hosts - -df -h -ccache --show-stats -ccache --zero-stats -ccache --max-size=${CCACHE_SIZE:=32G} diff --git a/debian/pbuilder-hooks/A01xlocale b/debian/pbuilder-hooks/A01xlocale deleted file mode 100755 index 0e90f4ee71c..00000000000 --- a/debian/pbuilder-hooks/A01xlocale +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -# https://github.com/llvm-mirror/libcxx/commit/6e02e89f65ca1ca1d6ce30fbc557563164dd327e - -touch /usr/include/xlocale.h diff --git a/debian/pbuilder-hooks/B00ccache-stat b/debian/pbuilder-hooks/B00ccache-stat deleted file mode 100755 index fdf6db1b7e7..00000000000 --- a/debian/pbuilder-hooks/B00ccache-stat +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -ccache --show-stats diff --git a/debian/pbuilder-hooks/B90test-server b/debian/pbuilder-hooks/B90test-server deleted file mode 100755 index e36c255f9fc..00000000000 --- a/debian/pbuilder-hooks/B90test-server +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash -set -e -set -x - -TEST_CONNECT=${TEST_CONNECT=1} -TEST_SSL=${TEST_SSL=1} -PACKAGE_INSTALL=${PACKAGE_INSTALL=1} -TEST_PORT_RANDOM=${TEST_PORT_RANDOM=1} - -if [ "${PACKAGE_INSTALL}" ]; then - dpkg --auto-deconfigure -i /tmp/buildd/*.deb ||: - apt install -y -f --allow-downgrades ||: - dpkg -l | grep clickhouse ||: - - # Second install to replace debian versions - dpkg --auto-deconfigure -i /tmp/buildd/*.deb ||: - dpkg -l | grep clickhouse ||: - - # Some test references uses specific timezone - ln -fs /usr/share/zoneinfo/Europe/Moscow /etc/localtime - echo 'Europe/Moscow' > /etc/timezone - dpkg-reconfigure -f noninteractive tzdata -fi - -mkdir -p /etc/clickhouse-server/config.d /etc/clickhouse-client/config.d - -if [ "${TEST_PORT_RANDOM}" ]; then - CLICKHOUSE_PORT_BASE=${CLICKHOUSE_PORT_BASE:=$(( ( RANDOM % 50000 ) + 10000 ))} - CLICKHOUSE_PORT_TCP=${CLICKHOUSE_PORT_TCP:=$(($CLICKHOUSE_PORT_BASE + 1))} - CLICKHOUSE_PORT_HTTP=${CLICKHOUSE_PORT_HTTP:=$(($CLICKHOUSE_PORT_BASE + 2))} - CLICKHOUSE_PORT_INTERSERVER=${CLICKHOUSE_PORT_INTERSERVER:=$(($CLICKHOUSE_PORT_BASE + 3))} - CLICKHOUSE_PORT_TCP_SECURE=${CLICKHOUSE_PORT_TCP_SECURE:=$(($CLICKHOUSE_PORT_BASE + 4))} - CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:=$(($CLICKHOUSE_PORT_BASE + 5))} -fi - -export CLICKHOUSE_PORT_TCP=${CLICKHOUSE_PORT_TCP:=9000} -export CLICKHOUSE_PORT_HTTP=${CLICKHOUSE_PORT_HTTP:=8123} -export CLICKHOUSE_PORT_INTERSERVER=${CLICKHOUSE_PORT_INTERSERVER:=9009} -export CLICKHOUSE_PORT_TCP_SECURE=${CLICKHOUSE_PORT_TCP_SECURE:=9440} -export CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:=8443} - -if [ "${TEST_CONNECT}" ]; then - [ "${TEST_PORT_RANDOM}" ] && echo "${CLICKHOUSE_PORT_HTTP}${CLICKHOUSE_PORT_TCP}${CLICKHOUSE_PORT_INTERSERVER}" > /etc/clickhouse-server/config.d/port.xml - - if [ "${TEST_SSL}" ]; then - CLICKHOUSE_SSL_CONFIG="noneAcceptCertificateHandler" - echo "${CLICKHOUSE_PORT_HTTPS}${CLICKHOUSE_PORT_TCP_SECURE}${CLICKHOUSE_SSL_CONFIG}" > /etc/clickhouse-server/config.d/ssl.xml - echo "${CLICKHOUSE_PORT_TCP}${CLICKHOUSE_PORT_TCP_SECURE}${CLICKHOUSE_SSL_CONFIG}" > /etc/clickhouse-client/config.xml - openssl dhparam -out /etc/clickhouse-server/dhparam.pem 256 - openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt - chmod -f a+r /etc/clickhouse-server/* /etc/clickhouse-client/* ||: - CLIENT_ADD+="--secure --port ${CLICKHOUSE_PORT_TCP_SECURE}" - else - CLIENT_ADD+="--port ${CLICKHOUSE_PORT_TCP}" - fi - - # For debug - # tail -n +1 -- /etc/clickhouse-server/*.xml /etc/clickhouse-server/config.d/*.xml ||: - - function finish { - service clickhouse-server stop - tail -n 100 /var/log/clickhouse-server/*.log ||: - sleep 1 - killall -9 clickhouse-server ||: - } - trap finish EXIT SIGINT SIGQUIT SIGTERM - - service clickhouse-server start - sleep ${TEST_SERVER_STARTUP_WAIT:=5} - service clickhouse-server status - - # TODO: remove me or make only on error: - tail -n100 /var/log/clickhouse-server/*.log ||: - - clickhouse-client --port $CLICKHOUSE_PORT_TCP -q "SELECT * from system.build_options;" - clickhouse-client ${CLIENT_ADD} -q "SELECT toDateTime(1);" - - ( [ "${TEST_RUN}" ] && clickhouse-test --queries /usr/share/clickhouse-test/queries --tmp /tmp/clickhouse-test/ ${TEST_OPT} ) || ${TEST_TRUE:=true} - - service clickhouse-server stop - -fi - -# Test debug symbols -# gdb -ex quit --args /usr/bin/clickhouse-server diff --git a/debian/pbuilder-hooks/C99kill-make b/debian/pbuilder-hooks/C99kill-make deleted file mode 100755 index 2068e75dc40..00000000000 --- a/debian/pbuilder-hooks/C99kill-make +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -# Try stop parallel build after timeout - -killall make gcc gcc-8 g++-8 gcc-9 g++-9 clang clang-6.0 clang++-6.0 clang-7 clang++-7 ||: diff --git a/debian/rules b/debian/rules index 8eb47e95389..73d1f3d3b34 100755 --- a/debian/rules +++ b/debian/rules @@ -113,9 +113,6 @@ override_dh_install: ln -sf clickhouse-server.docs debian/clickhouse-client.docs ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs - mkdir -p $(DESTDIR)/etc/security/limits.d - cp debian/clickhouse.limits $(DESTDIR)/etc/security/limits.d/clickhouse.conf - # systemd compatibility mkdir -p $(DESTDIR)/etc/systemd/system/ cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/ diff --git a/debian/watch b/debian/watch index 7ad4cedf713..ed3cab97ade 100644 --- a/debian/watch +++ b/debian/watch @@ -1,6 +1,6 @@ version=4 opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \ - https://github.com/yandex/clickhouse/tags \ + https://github.com/ClickHouse/ClickHouse/tags \ (?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index d9cd68254b7..79ac92f2277 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.4.1.* +ARG version=21.7.1.* RUN apt-get update \ && apt-get install --yes --no-install-recommends \ diff --git a/docker/images.json b/docker/images.json index 303bd159ce4..e2e22468596 100644 --- a/docker/images.json +++ b/docker/images.json @@ -138,7 +138,8 @@ "docker/test/stateless_unbundled", "docker/test/stateless_pytest", "docker/test/integration/base", - "docker/test/fuzzer" + "docker/test/fuzzer", + "docker/test/keeper-jepsen" ] }, "docker/packager/unbundled": { @@ -159,5 +160,9 @@ "docker/test/sqlancer": { "name": "yandex/clickhouse-sqlancer-test", "dependent": [] + }, + "docker/test/keeper-jepsen": { + "name": "yandex/clickhouse-keeper-jepsen-test", + "dependent": [] } } diff --git a/docker/packager/README.md b/docker/packager/README.md index 9fbc2d7f8b5..a745f6225fa 100644 --- a/docker/packager/README.md +++ b/docker/packager/README.md @@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen Usage: -Build deb package with `gcc-9` in `debug` mode: +Build deb package with `clang-11` in `debug` mode: ``` $ mkdir deb/test_output -$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=gcc-9 --build-type=debug +$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-11 --build-type=debug $ ls -l deb/test_output -rw-r--r-- 1 root root 3730 clickhouse-client_18.14.2+debug_all.deb -rw-r--r-- 1 root root 84221888 clickhouse-common-static_18.14.2+debug_amd64.deb @@ -18,11 +18,11 @@ $ ls -l deb/test_output ``` -Build ClickHouse binary with `clang-10` and `address` sanitizer in `relwithdebuginfo` +Build ClickHouse binary with `clang-11` and `address` sanitizer in `relwithdebuginfo` mode: ``` $ mkdir $HOME/some_clickhouse -$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-10 --sanitizer=address +$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-11 --sanitizer=address $ ls -l $HOME/some_clickhouse -rwxr-xr-x 1 root root 787061952 clickhouse lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 91036d88d8c..56b2af5cf84 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -14,12 +14,8 @@ RUN apt-get update \ lsb-release \ wget \ --yes --no-install-recommends --verbose-versions \ - && cat /etc/resolv.conf \ - && echo "nameserver 1.1.1.1" >> /etc/resolv.conf \ - && nslookup -debug apt.llvm.org \ - && ping -c1 apt.llvm.org \ - && wget -nv --retry-connrefused --tries=10 -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \ && export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \ + && wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \ && echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \ && apt-key add /tmp/llvm-snapshot.gpg.key \ && export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \ @@ -36,41 +32,30 @@ RUN apt-get update \ software-properties-common \ --yes --no-install-recommends -RUN cat /etc/resolv.conf \ - && echo "nameserver 1.1.1.1" >> /etc/resolv.conf \ - && nslookup -debug apt.llvm.org \ - && apt-get update \ +RUN apt-get update \ && apt-get install \ bash \ - cmake \ + build-essential \ ccache \ - curl \ - gcc-9 \ - g++-9 \ - clang-10 \ - clang-tidy-10 \ - lld-10 \ - llvm-10 \ - llvm-10-dev \ clang-11 \ clang-tidy-11 \ + cmake \ + curl \ + g++-10 \ + gcc-10 \ + gdb \ + git \ + gperf \ + libicu-dev \ + libreadline-dev \ lld-11 \ llvm-11 \ llvm-11-dev \ - libicu-dev \ - libreadline-dev \ + moreutils \ ninja-build \ - gperf \ - git \ - opencl-headers \ - ocl-icd-libopencl1 \ - intel-opencl-icd \ - tzdata \ - gperf \ - cmake \ - gdb \ + pigz \ rename \ - build-essential \ + tzdata \ --yes --no-install-recommends # This symlink required by gcc to find lld compiler @@ -118,4 +103,4 @@ RUN rm /etc/apt/sources.list.d/proposed-repositories.list && apt-get update COPY build.sh / -CMD ["/bin/bash", "/build.sh"] +CMD ["bash", "-c", "/build.sh 2>&1 | ts"] diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index a42789c6186..cf74105fbbb 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -11,17 +11,28 @@ tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolc mkdir -p build/cmake/toolchain/freebsd-x86_64 tar xJf freebsd-11.3-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1 +# Uncomment to debug ccache. Don't put ccache log in /output right away, or it +# will be confusingly packed into the "performance" package. +# export CCACHE_LOGFILE=/build/ccache.log +# export CCACHE_DEBUG=1 + mkdir -p build/build_docker cd build/build_docker -ccache --show-stats ||: -ccache --zero-stats ||: -ln -s /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/libOpenCL.so ||: rm -f CMakeCache.txt # Read cmake arguments into array (possibly empty) read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. + +ccache --show-config ||: +ccache --show-stats ||: +ccache --zero-stats ||: + # shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty. ninja $NINJA_FLAGS clickhouse-bundle + +ccache --show-config ||: +ccache --show-stats ||: + mv ./programs/clickhouse* /output mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds find . -name '*.so' -print -exec mv '{}' /output \; @@ -65,8 +76,21 @@ then cp ../programs/server/config.xml /output/config cp ../programs/server/users.xml /output/config cp -r --dereference ../programs/server/config.d /output/config - tar -czvf "$COMBINED_OUTPUT.tgz" /output + tar -cv -I pigz -f "$COMBINED_OUTPUT.tgz" /output rm -r /output/* mv "$COMBINED_OUTPUT.tgz" /output fi -ccache --show-stats ||: + +if [ "${CCACHE_DEBUG:-}" == "1" ] +then + find . -name '*.ccache-*' -print0 \ + | tar -c -I pixz -f /output/ccache-debug.txz --null -T - +fi + +if [ -n "$CCACHE_LOGFILE" ] +then + # Compress the log as well, or else the CI will try to compress all log + # files in place, and will fail because this directory is not writable. + tar -cv -I pixz -f /output/ccache.log.txz "$CCACHE_LOGFILE" +fi + diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index 8fd89d60f85..2f1d28efe61 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -34,31 +34,25 @@ RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \ # Libraries from OS are only needed to test the "unbundled" build (this is not used in production). RUN apt-get update \ && apt-get install \ - gcc-9 \ - g++-9 \ + alien \ clang-11 \ clang-tidy-11 \ + cmake \ + debhelper \ + devscripts \ + gdb \ + git \ + gperf \ lld-11 \ llvm-11 \ llvm-11-dev \ - clang-10 \ - clang-tidy-10 \ - lld-10 \ - llvm-10 \ - llvm-10-dev \ + moreutils \ ninja-build \ perl \ - pkg-config \ - devscripts \ - debhelper \ - git \ - tzdata \ - gperf \ - alien \ - cmake \ - gdb \ - moreutils \ pigz \ + pixz \ + pkg-config \ + tzdata \ --yes --no-install-recommends # NOTE: For some reason we have outdated version of gcc-10 in ubuntu 20.04 stable. diff --git a/docker/packager/deb/build.sh b/docker/packager/deb/build.sh index 6450e21d289..4e14574b738 100755 --- a/docker/packager/deb/build.sh +++ b/docker/packager/deb/build.sh @@ -2,10 +2,16 @@ set -x -e +# Uncomment to debug ccache. +# export CCACHE_LOGFILE=/build/ccache.log +# export CCACHE_DEBUG=1 + +ccache --show-config ||: ccache --show-stats ||: ccache --zero-stats ||: + read -ra ALIEN_PKGS <<< "${ALIEN_PKGS:-}" -build/release --no-pbuilder "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S' +build/release "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S' mv /*.deb /output mv -- *.changes /output mv -- *.buildinfo /output @@ -22,5 +28,19 @@ then mv /build/obj-*/src/unit_tests_dbms /output/binary fi fi + +ccache --show-config ||: ccache --show-stats ||: -ln -s /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/libOpenCL.so ||: + +if [ "${CCACHE_DEBUG:-}" == "1" ] +then + find /build -name '*.ccache-*' -print0 \ + | tar -c -I pixz -f /output/ccache-debug.txz --null -T - +fi + +if [ -n "$CCACHE_LOGFILE" ] +then + # Compress the log as well, or else the CI will try to compress all log + # files in place, and will fail because this directory is not writable. + tar -cv -I pixz -f /output/ccache.log.txz "$CCACHE_LOGFILE" +fi diff --git a/docker/packager/packager b/docker/packager/packager index 65c03cc10e3..81474166cc9 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -143,8 +143,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ cmake_flags.append('-DUSE_GTEST=1') if unbundled: - # TODO: fix build with ENABLE_RDKAFKA - cmake_flags.append('-DUNBUNDLED=1 -DUSE_INTERNAL_RDKAFKA_LIBRARY=1 -DENABLE_ARROW=0 -DENABLE_ORC=0 -DENABLE_PARQUET=0') + cmake_flags.append('-DUNBUNDLED=1 -DUSE_INTERNAL_RDKAFKA_LIBRARY=1 -DENABLE_ARROW=0 -DENABLE_AVRO=0 -DENABLE_ORC=0 -DENABLE_PARQUET=0') if split_binary: cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1') @@ -155,6 +154,10 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ if clang_tidy: cmake_flags.append('-DENABLE_CLANG_TIDY=1') + cmake_flags.append('-DENABLE_UTILS=1') + cmake_flags.append('-DUSE_GTEST=1') + cmake_flags.append('-DENABLE_TESTS=1') + cmake_flags.append('-DENABLE_EXAMPLES=1') # Don't stop on first error to find more clang-tidy errors in one run. result.append('NINJA_FLAGS=-k0') @@ -182,9 +185,8 @@ if __name__ == "__main__": parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir)) parser.add_argument("--output-dir", required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") - parser.add_argument("--compiler", choices=("clang-10", "clang-10-darwin", "clang-10-aarch64", "clang-10-freebsd", - "clang-11", "clang-11-darwin", "clang-11-aarch64", "clang-11-freebsd", - "gcc-9", "gcc-10"), default="gcc-9") + parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-aarch64", "clang-11-freebsd", + "gcc-10"), default="clang-11") parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") parser.add_argument("--unbundled", action="store_true") parser.add_argument("--split-binary", action="store_true") diff --git a/docker/packager/unbundled/Dockerfile b/docker/packager/unbundled/Dockerfile index f640c595f14..4dd6dbc61d8 100644 --- a/docker/packager/unbundled/Dockerfile +++ b/docker/packager/unbundled/Dockerfile @@ -35,9 +35,6 @@ RUN apt-get update \ libjemalloc-dev \ libmsgpack-dev \ libcurl4-openssl-dev \ - opencl-headers \ - ocl-icd-libopencl1 \ - intel-opencl-icd \ unixodbc-dev \ odbcinst \ tzdata \ diff --git a/docker/packager/unbundled/build.sh b/docker/packager/unbundled/build.sh index 54575ab977c..c43c6b5071e 100755 --- a/docker/packager/unbundled/build.sh +++ b/docker/packager/unbundled/build.sh @@ -5,7 +5,7 @@ set -x -e ccache --show-stats ||: ccache --zero-stats ||: read -ra ALIEN_PKGS <<< "${ALIEN_PKGS:-}" -build/release --no-pbuilder "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S' +build/release "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S' mv /*.deb /output mv -- *.changes /output mv -- *.buildinfo /output @@ -13,4 +13,3 @@ mv /*.rpm /output ||: # if exists mv /*.tgz /output ||: # if exists ccache --show-stats ||: -ln -s /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/libOpenCL.so ||: diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index d22516eab0a..52dcb6caae5 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.4.1.* +ARG version=21.7.1.* ARG gosu_ver=1.10 # set non-empty deb_location_url url to create a docker image @@ -64,6 +64,8 @@ RUN groupadd -r clickhouse --gid=101 \ clickhouse-client=$version \ clickhouse-server=$version ; \ fi \ + && wget --progress=bar:force:noscroll "https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-$(dpkg --print-architecture)" -O /bin/gosu \ + && chmod +x /bin/gosu \ && clickhouse-local -q 'SELECT * FROM system.build_options' \ && rm -rf \ /var/lib/apt/lists/* \ @@ -76,8 +78,6 @@ RUN groupadd -r clickhouse --gid=101 \ # we need to allow "others" access to clickhouse folder, because docker container # can be started with arbitrary uid (openshift usecase) -ADD https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-amd64 /bin/gosu - RUN locale-gen en_US.UTF-8 ENV LANG en_US.UTF-8 ENV LANGUAGE en_US:en @@ -88,10 +88,7 @@ RUN mkdir /docker-entrypoint-initdb.d COPY docker_related_config.xml /etc/clickhouse-server/config.d/ COPY entrypoint.sh /entrypoint.sh - -RUN chmod +x \ - /entrypoint.sh \ - /bin/gosu +RUN chmod +x /entrypoint.sh EXPOSE 9000 8123 9009 VOLUME /var/lib/clickhouse diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index e727d2a3ecf..9809a36395d 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.4.1.* +ARG version=21.7.1.* RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 158d2608f41..44b9d42d6a1 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -4,9 +4,8 @@ FROM ubuntu:20.04 ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 RUN apt-get update \ - && apt-get install apt-utils ca-certificates lsb-release wget gnupg apt-transport-https \ + && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ --yes --no-install-recommends --verbose-versions \ - && echo "nameserver 1.1.1.1" >> /etc/resolv.conf \ && export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \ && wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \ && echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \ @@ -32,8 +31,7 @@ RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \ && chmod +x dpkg-deb \ && cp dpkg-deb /usr/bin -RUN echo "nameserver 1.1.1.1" >> /etc/resolv.conf \ - && apt-get update \ +RUN apt-get update \ && apt-get install \ clang-${LLVM_VERSION} \ debhelper \ @@ -53,13 +51,13 @@ RUN echo "nameserver 1.1.1.1" >> /etc/resolv.conf \ # Sanitizer options for services (clickhouse-server) RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment; \ echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \ - echo "MSAN_OPTIONS='abort_on_error=1'" >> /etc/environment; \ + echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment; \ echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment; \ ln -s /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-symbolizer /usr/bin/llvm-symbolizer; # Sanitizer options for current shell (not current, but the one that will be spawned on "docker run") # (but w/o verbosity for TSAN, otherwise test.reference will not match) ENV TSAN_OPTIONS='halt_on_error=1 history_size=7' ENV UBSAN_OPTIONS='print_stacktrace=1' -ENV MSAN_OPTIONS='abort_on_error=1' +ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1' CMD sleep 1 diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 64be52d8e30..2864f7fc4da 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-fasttest . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=10 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 RUN apt-get update \ && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ @@ -43,20 +43,20 @@ RUN apt-get update \ clang-tidy-${LLVM_VERSION} \ cmake \ curl \ - lsof \ expect \ fakeroot \ - git \ gdb \ + git \ gperf \ lld-${LLVM_VERSION} \ llvm-${LLVM_VERSION} \ + lsof \ moreutils \ ninja-build \ psmisc \ python3 \ - python3-pip \ python3-lxml \ + python3-pip \ python3-requests \ python3-termcolor \ rename \ diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index bbd5443ffb6..fc73a0df0ee 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -8,6 +8,9 @@ trap 'kill $(jobs -pr) ||:' EXIT # that we can run the "everything else" stage from the cloned source. stage=${stage:-} +# Compiler version, normally set by Dockerfile +export LLVM_VERSION=${LLVM_VERSION:-11} + # A variable to pass additional flags to CMake. # Here we explicitly default it to nothing so that bash doesn't complain about # it being undefined. Also read it as array so that we can pass an empty list @@ -70,7 +73,7 @@ function start_server --path "$FASTTEST_DATA" --user_files_path "$FASTTEST_DATA/user_files" --top_level_domains_path "$FASTTEST_DATA/top_level_domains" - --test_keeper_server.log_storage_path "$FASTTEST_DATA/coordination" + --keeper_server.storage_path "$FASTTEST_DATA/coordination" ) clickhouse-server "${opts[@]}" &>> "$FASTTEST_OUTPUT/server.log" & server_pid=$! @@ -124,22 +127,26 @@ continue function clone_root { - git clone https://github.com/ClickHouse/ClickHouse.git -- "$FASTTEST_SOURCE" | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/clone_log.txt" + git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$FASTTEST_SOURCE" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/clone_log.txt" ( cd "$FASTTEST_SOURCE" if [ "$PULL_REQUEST_NUMBER" != "0" ]; then - if git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then + if git fetch --depth 1 origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then git checkout FETCH_HEAD - echo 'Clonned merge head' + echo "Checked out pull/$PULL_REQUEST_NUMBER/merge ($(git rev-parse FETCH_HEAD))" else - git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/head" + git fetch --depth 1 origin "+refs/pull/$PULL_REQUEST_NUMBER/head" git checkout "$COMMIT_SHA" - echo 'Checked out to commit' + echo "Checked out nominal SHA $COMMIT_SHA for PR $PULL_REQUEST_NUMBER" fi else if [ -v COMMIT_SHA ]; then + git fetch --depth 1 origin "$COMMIT_SHA" git checkout "$COMMIT_SHA" + echo "Checked out nominal SHA $COMMIT_SHA for master" + else + echo "Using default repository head $(git rev-parse HEAD)" fi fi ) @@ -181,7 +188,7 @@ function clone_submodules ) git submodule sync - git submodule update --init --recursive "${SUBMODULES_TO_UPDATE[@]}" + git submodule update --depth 1 --init --recursive "${SUBMODULES_TO_UPDATE[@]}" git submodule foreach git reset --hard git submodule foreach git checkout @ -f git submodule foreach git clean -xfd @@ -215,7 +222,7 @@ function run_cmake ( cd "$FASTTEST_BUILD" - cmake "$FASTTEST_SOURCE" -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/cmake_log.txt" + cmake "$FASTTEST_SOURCE" -DCMAKE_CXX_COMPILER="clang++-${LLVM_VERSION}" -DCMAKE_C_COMPILER="clang-${LLVM_VERSION}" "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/cmake_log.txt" ) } @@ -223,7 +230,7 @@ function build { ( cd "$FASTTEST_BUILD" - time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" + time ninja clickhouse-bundle 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" if [ "$COPY_CLICKHOUSE_BINARY_TO_OUTPUT" -eq "1" ]; then cp programs/clickhouse "$FASTTEST_OUTPUT/clickhouse" fi @@ -293,6 +300,7 @@ function run_tests 01663_aes_msan # Depends on OpenSSL 01667_aes_args_check # Depends on OpenSSL 01776_decrypt_aead_size_check # Depends on OpenSSL + 01811_filter_by_null # Depends on OpenSSL 01281_unsucceeded_insert_select_queries_counter 01292_create_user 01294_lazy_database_concurrent @@ -300,10 +308,10 @@ function run_tests 01354_order_by_tuple_collate_const 01355_ilike 01411_bayesian_ab_testing - 01532_collate_in_low_cardinality - 01533_collate_in_nullable - 01542_collate_in_array - 01543_collate_in_tuple + 01798_uniq_theta_sketch + 01799_long_uniq_theta_sketch + collate + collation _orc_ arrow avro @@ -358,35 +366,25 @@ function run_tests # JSON functions 01666_blns + + # Requires postgresql-client + 01802_test_postgresql_protocol_with_row_policy + + # Depends on AWS + 01801_s3_cluster + + # Depends on LLVM JIT + 01072_nullable_jit + 01852_jit_if + 01865_jit_comparison_constant_result + 01871_merge_tree_compile_expressions ) - (time clickhouse-test --hung-check -j 8 --order=random --use-skip-list --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 ||:) | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt" - - # substr is to remove semicolon after test name - readarray -t FAILED_TESTS < <(awk '/\[ FAIL|TIMEOUT|ERROR \]/ { print substr($3, 1, length($3)-1) }' "$FASTTEST_OUTPUT/test_log.txt" | tee "$FASTTEST_OUTPUT/failed-parallel-tests.txt") - - # We will rerun sequentially any tests that have failed during parallel run. - # They might have failed because there was some interference from other tests - # running concurrently. If they fail even in seqential mode, we will report them. - # FIXME All tests that require exclusive access to the server must be - # explicitly marked as `sequential`, and `clickhouse-test` must detect them and - # run them in a separate group after all other tests. This is faster and also - # explicit instead of guessing. - if [[ -n "${FAILED_TESTS[*]}" ]] - then - stop_server ||: - - # Clean the data so that there is no interference from the previous test run. - rm -rf "$FASTTEST_DATA"/{{meta,}data,user_files,coordination} ||: - - start_server - - echo "Going to run again: ${FAILED_TESTS[*]}" - - clickhouse-test --hung-check --order=random --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_log.txt" - else - echo "No failed tests" - fi + time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \ + --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" \ + -- "$FASTTEST_FOCUS" 2>&1 \ + | ts '%Y-%m-%d %H:%M:%S' \ + | tee "$FASTTEST_OUTPUT/test_log.txt" } case "$stage" in @@ -420,7 +418,7 @@ case "$stage" in # See the compatibility hacks in `clone_root` stage above. Remove at the same time, # after Nov 1, 2020. cd "$FASTTEST_WORKSPACE" - clone_submodules | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/submodule_log.txt" + clone_submodules 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/submodule_log.txt" ;& "run_cmake") run_cmake @@ -431,7 +429,7 @@ case "$stage" in "configure") # The `install_log.txt` is also needed for compatibility with old CI task -- # if there is no log, it will decide that build failed. - configure | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt" + configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt" ;& "run_tests") run_tests diff --git a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml index 1e82f137961..dd6b7467afc 100644 --- a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml +++ b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml @@ -14,11 +14,6 @@ 10G - - - - - diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index e21f9efae66..670fc9e58b3 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -56,24 +56,40 @@ function watchdog sleep 3600 echo "Fuzzing run has timed out" - killall clickhouse-client ||: for _ in {1..10} do - if ! pgrep -f clickhouse-client + # Only kill by pid the particular client that runs the fuzzing, or else + # we can kill some clickhouse-client processes this script starts later, + # e.g. for checking server liveness. + if ! kill $fuzzer_pid then break fi sleep 1 done - killall -9 clickhouse-client ||: + kill -9 -- $fuzzer_pid ||: +} + +function filter_exists +{ + local path + for path in "$@"; do + if [ -e "$path" ]; then + echo "$path" + else + echo "'$path' does not exists" >&2 + fi + done } function fuzz { # Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests. # Don't overwrite the NEW_TESTS_OPT so that it can be set from the environment. - NEW_TESTS="$(grep -P 'tests/queries/0_stateless/.*\.sql' ci-changed-files.txt | sed -r -e 's!^!ch/!' | sort -R)" + NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\)$!ch/\1!p' ci-changed-files.txt | sort -R)" + # ci-changed-files.txt contains also files that has been deleted/renamed, filter them out. + NEW_TESTS="$(filter_exists $NEW_TESTS)" if [[ -n "$NEW_TESTS" ]] then NEW_TESTS_OPT="${NEW_TESTS_OPT:---interleave-queries-file ${NEW_TESTS}}" @@ -101,17 +117,49 @@ continue gdb -batch -command script.gdb -p "$(pidof clickhouse-server)" & - fuzzer_exit_code=0 # SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric. # SC2046: Quote this to prevent word splitting. Actually I need word splitting. # shellcheck disable=SC2012,SC2046 clickhouse-client --query-fuzzer-runs=1000 --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) $NEW_TESTS_OPT \ > >(tail -n 100000 > fuzzer.log) \ - 2>&1 \ - || fuzzer_exit_code=$? + 2>&1 & + fuzzer_pid=$! + echo "Fuzzer pid is $fuzzer_pid" + # Start a watchdog that should kill the fuzzer on timeout. + # The shell won't kill the child sleep when we kill it, so we have to put it + # into a separate process group so that we can kill them all. + set -m + watchdog & + watchdog_pid=$! + set +m + # Check that the watchdog has started. + kill -0 $watchdog_pid + + # Wait for the fuzzer to complete. + # Note that the 'wait || ...' thing is required so that the script doesn't + # exit because of 'set -e' when 'wait' returns nonzero code. + fuzzer_exit_code=0 + wait "$fuzzer_pid" || fuzzer_exit_code=$? echo "Fuzzer exit code is $fuzzer_exit_code" + kill -- -$watchdog_pid ||: + + # If the server dies, most often the fuzzer returns code 210: connetion + # refused, and sometimes also code 32: attempt to read after eof. For + # simplicity, check again whether the server is accepting connections, using + # clickhouse-client. We don't check for existence of server process, because + # the process is still present while the server is terminating and not + # accepting the connections anymore. + if clickhouse-client --query "select 1 format Null" + then + server_died=0 + else + echo "Server live check returns $?" + server_died=1 + fi + + # Stop the server. clickhouse-client --query "select elapsed, query from system.processes" ||: killall clickhouse-server ||: for _ in {1..10} @@ -123,6 +171,41 @@ continue sleep 1 done killall -9 clickhouse-server ||: + + # Debug. + date + sleep 10 + jobs + pstree -aspgT + + # Make files with status and description we'll show for this check on Github. + task_exit_code=$fuzzer_exit_code + if [ "$server_died" == 1 ] + then + # The server has died. + task_exit_code=210 + echo "failure" > status.txt + if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt + then + echo "Lost connection to server. See the logs." > description.txt + fi + elif [ "$fuzzer_exit_code" == "143" ] || [ "$fuzzer_exit_code" == "0" ] + then + # Variants of a normal run: + # 0 -- fuzzing ended earlier than timeout. + # 143 -- SIGTERM -- the fuzzer was killed by timeout. + task_exit_code=0 + echo "success" > status.txt + echo "OK" > description.txt + else + # The server was alive, but the fuzzer returned some error. Probably this + # is a problem in the fuzzer itself. Don't grep the server log in this + # case, because we will find a message about normal server termination + # (Received signal 15), which is confusing. + task_exit_code=$fuzzer_exit_code + echo "failure" > status.txt + echo "Fuzzer failed ($fuzzer_exit_code). See the logs." > description.txt + fi } case "$stage" in @@ -151,50 +234,7 @@ case "$stage" in time configure ;& "fuzz") - # Start a watchdog that should kill the fuzzer on timeout. - # The shell won't kill the child sleep when we kill it, so we have to put it - # into a separate process group so that we can kill them all. - set -m - watchdog & - watchdog_pid=$! - set +m - # Check that the watchdog has started - kill -0 $watchdog_pid - - fuzzer_exit_code=0 - time fuzz || fuzzer_exit_code=$? - kill -- -$watchdog_pid ||: - - # Debug - date - sleep 10 - jobs - pstree -aspgT - - # Make files with status and description we'll show for this check on Github - task_exit_code=$fuzzer_exit_code - if [ "$fuzzer_exit_code" == 143 ] - then - # SIGTERM -- the fuzzer was killed by timeout, which means a normal run. - echo "success" > status.txt - echo "OK" > description.txt - task_exit_code=0 - elif [ "$fuzzer_exit_code" == 210 ] - then - # Lost connection to the server. This probably means that the server died - # with abort. - echo "failure" > status.txt - if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt - then - echo "Lost connection to server. See the logs." > description.txt - fi - else - # Something different -- maybe the fuzzer itself died? Don't grep the - # server log in this case, because we will find a message about normal - # server termination (Received signal 15), which is confusing. - echo "failure" > status.txt - echo "Fuzzer failed ($fuzzer_exit_code). See the logs." > description.txt - fi + time fuzz ;& "report") cat > report.html < /etc/docker/daemon.json << EOF +{ + "ipv6": true, + "fixed-cidr-v6": "fd00::/8", + "ip-forward": true, + "insecure-registries" : ["dockerhub-proxy.sas.yp-c.yandex.net:5000"], + "registry-mirrors" : ["http://dockerhub-proxy.sas.yp-c.yandex.net:5000"] +} +EOF + dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 &>/var/log/somefile & set +e @@ -16,11 +27,16 @@ while true; do done set -e +# cleanup for retry run if volume is not recreated +docker kill "$(docker ps -aq)" || true +docker rm "$(docker ps -aq)" || true + echo "Start tests" export CLICKHOUSE_TESTS_SERVER_BIN_PATH=/clickhouse export CLICKHOUSE_TESTS_CLIENT_BIN_PATH=/clickhouse export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=/clickhouse-config export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge +export CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH=/clickhouse-library-bridge export DOCKER_MYSQL_GOLANG_CLIENT_TAG=${DOCKER_MYSQL_GOLANG_CLIENT_TAG:=latest} export DOCKER_MYSQL_JAVA_CLIENT_TAG=${DOCKER_MYSQL_JAVA_CLIENT_TAG:=latest} diff --git a/docker/test/keeper-jepsen/Dockerfile b/docker/test/keeper-jepsen/Dockerfile new file mode 100644 index 00000000000..1a62d5e793f --- /dev/null +++ b/docker/test/keeper-jepsen/Dockerfile @@ -0,0 +1,39 @@ +# docker build -t yandex/clickhouse-keeper-jepsen-test . +FROM yandex/clickhouse-test-base + +ENV DEBIAN_FRONTEND=noninteractive +ENV CLOJURE_VERSION=1.10.3.814 + +# arguments +ENV PR_TO_TEST="" +ENV SHA_TO_TEST="" + +ENV NODES_USERNAME="root" +ENV NODES_PASSWORD="" +ENV TESTS_TO_RUN="30" +ENV TIME_LIMIT="30" + + +# volumes +ENV NODES_FILE_PATH="/nodes.txt" +ENV TEST_OUTPUT="/test_output" + +RUN mkdir "/root/.ssh" +RUN touch "/root/.ssh/known_hosts" + +# install java +RUN apt-get update && apt-get install default-jre default-jdk libjna-java libjna-jni ssh gnuplot graphviz --yes --no-install-recommends + +# install clojure +RUN curl -O "https://download.clojure.org/install/linux-install-${CLOJURE_VERSION}.sh" && \ + chmod +x "linux-install-${CLOJURE_VERSION}.sh" && \ + bash "./linux-install-${CLOJURE_VERSION}.sh" + +# install leiningen +RUN curl -O "https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein" && \ + chmod +x ./lein && \ + mv ./lein /usr/bin + +COPY run.sh / + +CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/keeper-jepsen/run.sh b/docker/test/keeper-jepsen/run.sh new file mode 100644 index 00000000000..352585e16e3 --- /dev/null +++ b/docker/test/keeper-jepsen/run.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +set -euo pipefail + + +CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} +CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""} + + +if [ -z "$CLICKHOUSE_REPO_PATH" ]; then + CLICKHOUSE_REPO_PATH=ch + rm -rf ch ||: + mkdir ch ||: + wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" + tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz + ls -lath ||: +fi + +cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse-keeper" + +(lein run test-all --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --snapshot-distance 100 --stale-log-gap 100 --reserved-log-items 10 --lightweight-run --clickhouse-source "$CLICKHOUSE_PACKAGE" -q --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log" + +mv store "$TEST_OUTPUT/" diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 4507de16492..a027a94ab70 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -243,9 +243,12 @@ function run_tests profile_seconds_left=600 # Run the tests. + total_tests=$(echo "$test_files" | wc -w) + current_test=0 test_name="" for test in $test_files do + echo "$current_test of $total_tests tests complete" > status.txt # Check that both servers are alive, and restart them if they die. clickhouse-client --port $LEFT_SERVER_PORT --query "select 1 format Null" \ || { echo $test_name >> left-server-died.log ; restart ; } @@ -273,6 +276,7 @@ function run_tests profile_seconds_left=$(awk -F' ' \ 'BEGIN { s = '$profile_seconds_left'; } /^profile-total/ { s -= $2 } END { print s }' \ "$test_name-raw.tsv") + current_test=$((current_test + 1)) done unset TIMEFORMAT @@ -548,6 +552,63 @@ create table query_metric_stats_denorm engine File(TSVWithNamesAndTypes, order by test, query_index, metric_name ; " 2> >(tee -a analyze/errors.log 1>&2) + +# Fetch historical query variability thresholds from the CI database +clickhouse-local --query " + left join file('analyze/report-thresholds.tsv', TSV, + 'test text, report_threshold float') thresholds + on query_metric_stats.test = thresholds.test +" + +if [ -v CHPC_DATABASE_URL ] +then + set +x # Don't show password in the log + client=(clickhouse-client + # Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000 + # so I have to extract host and port with clickhouse-local. I tried to use + # Poco URI parser to support this in the client, but it's broken and can't + # parse host:port. + $(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") + --secure + --user "${CHPC_DATABASE_USER}" + --password "${CHPC_DATABASE_PASSWORD}" + --config "right/config/client_config.xml" + --database perftest + --date_time_input_format=best_effort) + + +# Precision is going to be 1.5 times worse for PRs. How do I know it? I ran this: +# SELECT quantilesExact(0., 0.1, 0.5, 0.75, 0.95, 1.)(p / m) +# FROM +# ( +# SELECT +# quantileIf(0.95)(stat_threshold, pr_number = 0) AS m, +# quantileIf(0.95)(stat_threshold, (pr_number != 0) AND (abs(diff) < stat_threshold)) AS p +# FROM query_metrics_v2 +# WHERE (event_date > (today() - toIntervalMonth(1))) AND (metric = 'client_time') +# GROUP BY +# test, +# query_index, +# query_display_name +# HAVING count(*) > 100 +# ) +# The file can be empty if the server is inaccessible, so we can't use TSVWithNamesAndTypes. + "${client[@]}" --query " + select test, query_index, + quantileExact(0.99)(abs(diff)) max_diff, + quantileExactIf(0.99)(stat_threshold, abs(diff) < stat_threshold) * 1.5 max_stat_threshold, + query_display_name + from query_metrics_v2 + where event_date > now() - interval 1 month + and metric = 'client_time' + and pr_number = 0 + group by test, query_index, query_display_name + having count(*) > 100 + " > analyze/historical-thresholds.tsv +else + touch analyze/historical-thresholds.tsv +fi + } # Analyze results @@ -592,6 +653,26 @@ create view query_metric_stats as diff float, stat_threshold float') ; +create table report_thresholds engine File(TSVWithNamesAndTypes, 'report/thresholds.tsv') + as select + query_display_names.test test, query_display_names.query_index query_index, + ceil(greatest(0.1, historical_thresholds.max_diff, + test_thresholds.report_threshold), 2) changed_threshold, + ceil(greatest(0.2, historical_thresholds.max_stat_threshold, + test_thresholds.report_threshold + 0.1), 2) unstable_threshold, + query_display_names.query_display_name query_display_name + from query_display_names + left join file('analyze/historical-thresholds.tsv', TSV, + 'test text, query_index int, max_diff float, max_stat_threshold float, + query_display_name text') historical_thresholds + on query_display_names.test = historical_thresholds.test + and query_display_names.query_index = historical_thresholds.query_index + and query_display_names.query_display_name = historical_thresholds.query_display_name + left join file('analyze/report-thresholds.tsv', TSV, + 'test text, report_threshold float') test_thresholds + on query_display_names.test = test_thresholds.test + ; + -- Main statistics for queries -- query time as reported in query log. create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv') as select @@ -606,23 +687,23 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv') -- uncaught regressions, because for the default 7 runs we do for PRs, -- the randomization distribution has only 16 values, so the max quantile -- is actually 0.9375. - abs(diff) > report_threshold and abs(diff) >= stat_threshold as changed_fail, - abs(diff) > report_threshold - 0.05 and abs(diff) >= stat_threshold as changed_show, + abs(diff) > changed_threshold and abs(diff) >= stat_threshold as changed_fail, + abs(diff) > changed_threshold - 0.05 and abs(diff) >= stat_threshold as changed_show, - not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail, - not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show, + not changed_fail and stat_threshold > unstable_threshold as unstable_fail, + not changed_show and stat_threshold > unstable_threshold - 0.05 as unstable_show, left, right, diff, stat_threshold, - if(report_threshold > 0, report_threshold, 0.10) as report_threshold, query_metric_stats.test test, query_metric_stats.query_index query_index, - query_display_name + query_display_names.query_display_name query_display_name from query_metric_stats - left join file('analyze/report-thresholds.tsv', TSV, - 'test text, report_threshold float') thresholds - on query_metric_stats.test = thresholds.test left join query_display_names on query_metric_stats.test = query_display_names.test and query_metric_stats.query_index = query_display_names.query_index + left join report_thresholds + on query_display_names.test = report_thresholds.test + and query_display_names.query_index = report_thresholds.query_index + and query_display_names.query_display_name = report_thresholds.query_display_name -- 'server_time' is rounded down to ms, which might be bad for very short queries. -- Use 'client_time' instead. where metric_name = 'client_time' @@ -885,7 +966,6 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts order by test, query_index; " 2> >(tee -a report/errors.log 1>&2) - # Prepare source data for metrics and flamegraphs for queries that were profiled # by perf.py. for version in {right,left} @@ -1015,6 +1095,7 @@ done wait # Create per-query flamegraphs +touch report/query-files.txt IFS=$'\n' for version in {right,left} do @@ -1149,20 +1230,21 @@ function upload_results return 0 fi - # Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000 - # so I have to extract host and port with clickhouse-local. I tried to use - # Poco URI parser to support this in the client, but it's broken and can't - # parse host:port. set +x # Don't show password in the log - clickhouse-client \ - $(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") \ - --secure \ - --user "${CHPC_DATABASE_USER}" \ - --password "${CHPC_DATABASE_PASSWORD}" \ - --config "right/config/client_config.xml" \ - --database perftest \ - --date_time_input_format=best_effort \ - --query " + client=(clickhouse-client + # Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000 + # so I have to extract host and port with clickhouse-local. I tried to use + # Poco URI parser to support this in the client, but it's broken and can't + # parse host:port. + $(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") + --secure + --user "${CHPC_DATABASE_USER}" + --password "${CHPC_DATABASE_PASSWORD}" + --config "right/config/client_config.xml" + --database perftest + --date_time_input_format=best_effort) + + "${client[@]}" --query " insert into query_metrics_v2 select toDate(event_time) event_date, @@ -1185,6 +1267,31 @@ function upload_results format TSV settings date_time_input_format='best_effort' " < report/all-query-metrics.tsv # Don't leave whitespace after INSERT: https://github.com/ClickHouse/ClickHouse/issues/16652 + + # Upload some run attributes. I use this weird form because it is the same + # form that can be used for historical data when you only have compare.log. + cat compare.log \ + | sed -n ' + s/.*Model name:[[:space:]]\+\(.*\)$/metric lscpu-model-name \1/p; + s/.*L1d cache:[[:space:]]\+\(.*\)$/metric lscpu-l1d-cache \1/p; + s/.*L1i cache:[[:space:]]\+\(.*\)$/metric lscpu-l1i-cache \1/p; + s/.*L2 cache:[[:space:]]\+\(.*\)$/metric lscpu-l2-cache \1/p; + s/.*L3 cache:[[:space:]]\+\(.*\)$/metric lscpu-l3-cache \1/p; + s/.*left_sha=\(.*\)$/old-sha \1/p; + s/.*right_sha=\(.*\)/new-sha \1/p' \ + | awk ' + BEGIN { FS = "\t"; OFS = "\t" } + /^old-sha/ { old_sha=$2 } + /^new-sha/ { new_sha=$2 } + /^metric/ { print old_sha, new_sha, $2, $3 }' \ + | "${client[@]}" --query "INSERT INTO run_attributes_v1 FORMAT TSV" + + # Grepping numactl results from log is too crazy, I'll just call it again. + "${client[@]}" --query "INSERT INTO run_attributes_v1 FORMAT TSV" < + - + :: diff --git a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml index 41bc7f777bf..2c06be9bb91 100644 --- a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml +++ b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml @@ -17,6 +17,12 @@ 12 + + + 64Mi + + + 0 diff --git a/docker/test/performance-comparison/perf.py b/docker/test/performance-comparison/perf.py index 4727f485943..9628c512e83 100755 --- a/docker/test/performance-comparison/perf.py +++ b/docker/test/performance-comparison/perf.py @@ -44,7 +44,7 @@ parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated l parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.') parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.') parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.') -parser.add_argument('--max-query-seconds', type=int, default=10, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.') +parser.add_argument('--max-query-seconds', type=int, default=15, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.') parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.') parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.') parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.') @@ -66,7 +66,12 @@ reportStageEnd('parse') subst_elems = root.findall('substitutions/substitution') available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... } for e in subst_elems: - available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')] + name = e.find('name').text + values = [v.text for v in e.findall('values/value')] + if not values: + raise Exception(f'No values given for substitution {{{name}}}') + + available_parameters[name] = values # Takes parallel lists of templates, substitutes them with all combos of # parameters. The set of parameters is determined based on the first list. @@ -76,7 +81,10 @@ def substitute_parameters(query_templates, other_templates = []): query_results = [] other_results = [[]] * (len(other_templates)) for i, q in enumerate(query_templates): - keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n) + # We need stable order of keys here, so that the order of substitutions + # is always the same, and the query indexes are consistent across test + # runs. + keys = sorted(set(n for _, n, _, _ in string.Formatter().parse(q) if n)) values = [available_parameters[k] for k in keys] combos = itertools.product(*values) for c in combos: @@ -265,8 +273,14 @@ for query_index in queries_to_run: prewarm_id = f'{query_prefix}.prewarm0' try: - # Will also detect too long queries during warmup stage - res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': args.max_query_seconds}) + # During the warmup runs, we will also: + # * detect queries that are exceedingly long, to fail fast, + # * collect profiler traces, which might be helpful for analyzing + # test coverage. We disable profiler for normal runs because + # it makes the results unstable. + res = c.execute(q, query_id = prewarm_id, + settings = {'max_execution_time': args.max_query_seconds, + 'query_profiler_real_time_period_ns': 10000000}) except clickhouse_driver.errors.Error as e: # Add query id to the exception to make debugging easier. e.args = (prewarm_id, *e.args) @@ -351,10 +365,11 @@ for query_index in queries_to_run: # For very short queries we have a special mode where we run them for at # least some time. The recommended lower bound of run time for "normal" # queries is about 0.1 s, and we run them about 10 times, giving the - # time per query per server of about one second. Use this value as a - # reference for "short" queries. + # time per query per server of about one second. Run "short" queries + # for longer time, because they have a high percentage of overhead and + # might give less stable results. if is_short[query_index]: - if server_seconds >= 2 * len(this_query_connections): + if server_seconds >= 8 * len(this_query_connections): break # Also limit the number of runs, so that we don't go crazy processing # the results -- 'eqmed.sql' is really suboptimal. diff --git a/docker/test/performance-comparison/report.py b/docker/test/performance-comparison/report.py index 9d3ccabb788..dabf6b7b93d 100755 --- a/docker/test/performance-comparison/report.py +++ b/docker/test/performance-comparison/report.py @@ -446,11 +446,17 @@ if args.report == 'main': attrs[3] = f'style="background: {color_bad}"' else: attrs[3] = '' + # Just don't add the slightly unstable queries we don't consider + # errors. It's not clear what the user should do with them. + continue text += tableRow(r, attrs, anchor) text += tableEnd() - tables.append(text) + + # Don't add an empty table. + if very_unstable_queries: + tables.append(text) add_unstable_queries() @@ -520,12 +526,13 @@ if args.report == 'main': for t in tables: print(t) - print(""" + print(f""" @@ -548,16 +555,15 @@ if args.report == 'main': message_array.append(str(slower_queries) + ' slower') if unstable_partial_queries: - unstable_queries += unstable_partial_queries - error_tests += unstable_partial_queries + very_unstable_queries += unstable_partial_queries status = 'failure' - if unstable_queries: - message_array.append(str(unstable_queries) + ' unstable') - -# Disabled before fix. -# if very_unstable_queries: -# status = 'failure' + # Don't show mildly unstable queries, only the very unstable ones we + # treat as errors. + if very_unstable_queries: + error_tests += very_unstable_queries + status = 'failure' + message_array.append(str(very_unstable_queries) + ' unstable') error_tests += slow_average_tests if error_tests: @@ -638,12 +644,13 @@ elif args.report == 'all-queries': for t in tables: print(t) - print(""" + print(f""" diff --git a/docker/test/pvs/Dockerfile b/docker/test/pvs/Dockerfile index 382b486dda3..2983be2305f 100644 --- a/docker/test/pvs/Dockerfile +++ b/docker/test/pvs/Dockerfile @@ -41,6 +41,6 @@ CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-ana && cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF \ && ninja re2_st clickhouse_grpc_protos \ && pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \ + cp /repo_folder/pvs-studio.log /test_output; \ plog-converter -a GA:1,2 -t fullhtml -o /test_output/pvs-studio-html-report pvs-studio.log; \ plog-converter -a GA:1,2 -t tasklist -o /test_output/pvs-studio-task-report.txt pvs-studio.log - diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 6b90a9e7e37..8d865431570 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -13,6 +13,25 @@ dpkg -i package_folder/clickhouse-test_*.deb function start() { + if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then + # NOTE We run "clickhouse server" instead of "clickhouse-server" + # to make "pidof clickhouse-server" return single pid of the main instance. + # We wil run main instance using "service clickhouse-server start" + sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \ + -- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \ + --logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \ + --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \ + --mysql_port 19004 --postgresql_port 19005 \ + --keeper_server.tcp_port 19181 --keeper_server.server_id 2 + + sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \ + -- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \ + --logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \ + --tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \ + --mysql_port 29004 --postgresql_port 29005 \ + --keeper_server.tcp_port 29181 --keeper_server.server_id 3 + fi + counter=0 until clickhouse-client --query "SELECT 1" do @@ -35,9 +54,8 @@ start /s3downloader --dataset-names $DATASETS chmod 777 -R /var/lib/clickhouse clickhouse-client --query "SHOW DATABASES" -clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary" -clickhouse-client --query "CREATE DATABASE test" +clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary" service clickhouse-server restart # Wait for server to start accepting connections @@ -47,24 +65,50 @@ for _ in {1..120}; do done clickhouse-client --query "SHOW TABLES FROM datasets" -clickhouse-client --query "SHOW TABLES FROM test" -clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" -clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" -clickhouse-client --query "SHOW TABLES FROM test" - -if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test ; then - SKIP_LIST_OPT="--use-skip-list" -fi - -# We can have several additional options so we path them as array because it's -# more idiologically correct. -read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}" if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then - ADDITIONAL_OPTIONS+=('--replicated-database') + clickhouse-client --query "CREATE DATABASE test ON CLUSTER 'test_cluster_database_replicated' + ENGINE=Replicated('/test/clickhouse/db/test', '{shard}', '{replica}')" + + clickhouse-client --query "CREATE TABLE test.hits AS datasets.hits_v1" + clickhouse-client --query "CREATE TABLE test.visits AS datasets.visits_v1" + + clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1" + clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1" + + clickhouse-client --query "DROP TABLE datasets.hits_v1" + clickhouse-client --query "DROP TABLE datasets.visits_v1" + + MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours) + MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited) +else + clickhouse-client --query "CREATE DATABASE test" + clickhouse-client --query "SHOW TABLES FROM test" + clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" + clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" fi -clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt +clickhouse-client --query "SHOW TABLES FROM test" +clickhouse-client --query "SELECT count() FROM test.hits" +clickhouse-client --query "SELECT count() FROM test.visits" + +function run_tests() +{ + set -x + # We can have several additional options so we path them as array because it's + # more idiologically correct. + read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}" + + if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then + ADDITIONAL_OPTIONS+=('--replicated-database') + fi + + clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --use-skip-list --print-time "${ADDITIONAL_OPTIONS[@]}" \ + "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt +} + +export -f run_tests +timeout "$MAX_RUN_TIME" bash -c run_tests ||: ./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv @@ -73,3 +117,9 @@ mv /var/log/clickhouse-server/stderr.log /test_output/ ||: if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||: fi +if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then + pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||: + pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||: + mv /var/log/clickhouse-server/stderr1.log /test_output/ ||: + mv /var/log/clickhouse-server/stderr2.log /test_output/ ||: +fi diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 61d1b2f4849..658ae1f27ba 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -28,7 +28,8 @@ RUN apt-get update -y \ tree \ unixodbc \ wget \ - mysql-client=5.7* + mysql-client=5.7* \ + postgresql-client RUN pip3 install numpy scipy pandas diff --git a/docker/test/stateless/process_functional_tests_result.py b/docker/test/stateless/process_functional_tests_result.py index 27210ef9b80..02adf108212 100755 --- a/docker/test/stateless/process_functional_tests_result.py +++ b/docker/test/stateless/process_functional_tests_result.py @@ -12,6 +12,8 @@ UNKNOWN_SIGN = "[ UNKNOWN " SKIPPED_SIGN = "[ SKIPPED " HUNG_SIGN = "Found hung queries in processlist" +NO_TASK_TIMEOUT_SIGN = "All tests have finished" + def process_test_log(log_path): total = 0 skipped = 0 @@ -19,10 +21,13 @@ def process_test_log(log_path): failed = 0 success = 0 hung = False + task_timeout = True test_results = [] with open(log_path, 'r') as test_file: for line in test_file: line = line.strip() + if NO_TASK_TIMEOUT_SIGN in line: + task_timeout = False if HUNG_SIGN in line: hung = True if any(sign in line for sign in (OK_SIGN, FAIL_SING, UNKNOWN_SIGN, SKIPPED_SIGN)): @@ -52,7 +57,7 @@ def process_test_log(log_path): else: success += int(OK_SIGN in line) test_results.append((test_name, "OK", test_time)) - return total, skipped, unknown, failed, success, hung, test_results + return total, skipped, unknown, failed, success, hung, task_timeout, test_results def process_result(result_path): test_results = [] @@ -68,7 +73,7 @@ def process_result(result_path): state = "error" if result_path and os.path.exists(result_path): - total, skipped, unknown, failed, success, hung, test_results = process_test_log(result_path) + total, skipped, unknown, failed, success, hung, task_timeout, test_results = process_test_log(result_path) is_flacky_check = 1 < int(os.environ.get('NUM_TRIES', 1)) # If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately) # But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped. @@ -78,6 +83,9 @@ def process_result(result_path): if hung: description = "Some queries hung, " state = "failure" + elif task_timeout: + description = "Timeout, " + state = "failure" else: description = "" diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 3119ae27c59..8440b1548a5 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -34,36 +34,61 @@ if [ "$NUM_TRIES" -gt "1" ]; then # simpliest way to forward env variables to server sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon - sleep 5 else - service clickhouse-server start && sleep 5 + service clickhouse-server start fi -if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then - SKIP_LIST_OPT="--use-skip-list" +if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then + + sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \ + -- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \ + --logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \ + --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \ + --mysql_port 19004 --postgresql_port 19005 \ + --keeper_server.tcp_port 19181 --keeper_server.server_id 2 \ + --macros.replica r2 # It doesn't work :( + + sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \ + -- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \ + --logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \ + --tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \ + --mysql_port 29004 --postgresql_port 29005 \ + --keeper_server.tcp_port 29181 --keeper_server.server_id 3 \ + --macros.shard s2 # It doesn't work :( + + MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours) + MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited) fi +sleep 5 + function run_tests() { + set -x # We can have several additional options so we path them as array because it's # more idiologically correct. read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}" # Skip these tests, because they fail when we rerun them multiple times if [ "$NUM_TRIES" -gt "1" ]; then + ADDITIONAL_OPTIONS+=('--order=random') ADDITIONAL_OPTIONS+=('--skip') ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip') - ADDITIONAL_OPTIONS+=('--jobs') - ADDITIONAL_OPTIONS+=('4') + # Note that flaky check must be ran in parallel, but for now we run + # everything in parallel except DatabaseReplicated. See below. fi if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then ADDITIONAL_OPTIONS+=('--replicated-database') + else + # Too many tests fail for DatabaseReplicated in parallel. All other + # configurations are OK. + ADDITIONAL_OPTIONS+=('--jobs') + ADDITIONAL_OPTIONS+=('8') fi clickhouse-test --testname --shard --zookeeper --hung-check --print-time \ - --test-runs "$NUM_TRIES" \ - "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ + --use-skip-list --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ | ts '%Y-%m-%d %H:%M:%S' \ | tee -a test_output/test_result.txt } @@ -74,10 +99,49 @@ timeout "$MAX_RUN_TIME" bash -c run_tests ||: ./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv -pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz ||: +clickhouse-client -q "system flush logs" ||: + +pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz & +clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz & +clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz & +clickhouse-client --allow_introspection_functions=1 -q " + WITH + arrayMap(x -> concat(demangle(addressToSymbol(x)), ':', addressToLine(x)), trace) AS trace_array, + arrayStringConcat(trace_array, '\n') AS trace_string + SELECT * EXCEPT(trace), trace_string FROM system.trace_log FORMAT TSVWithNamesAndTypes +" | pigz > /test_output/trace-log.tsv.gz & + +# Also export trace log in flamegraph-friendly format. +for trace_type in CPU Memory Real +do + clickhouse-client -q " + select + arrayStringConcat((arrayMap(x -> concat(splitByChar('/', addressToLine(x))[-1], '#', demangle(addressToSymbol(x)) ), trace)), ';') AS stack, + count(*) AS samples + from system.trace_log + where trace_type = '$trace_type' + group by trace + order by samples desc + settings allow_introspection_functions = 1 + format TabSeparated" \ + | pigz > "/test_output/trace-log-$trace_type-flamegraph.tsv.gz" & +done + +wait ||: + mv /var/log/clickhouse-server/stderr.log /test_output/ ||: if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||: fi tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||: tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||: +tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: + +if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then + pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||: + pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||: + mv /var/log/clickhouse-server/stderr1.log /test_output/ ||: + mv /var/log/clickhouse-server/stderr2.log /test_output/ ||: + tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||: + tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||: +fi diff --git a/docker/test/stateless_unbundled/Dockerfile b/docker/test/stateless_unbundled/Dockerfile index 9efe08dbf23..c5463ac447d 100644 --- a/docker/test/stateless_unbundled/Dockerfile +++ b/docker/test/stateless_unbundled/Dockerfile @@ -14,9 +14,7 @@ RUN apt-get --allow-unauthenticated update -y \ expect \ gdb \ gperf \ - gperf \ heimdal-multidev \ - intel-opencl-icd \ libboost-filesystem-dev \ libboost-iostreams-dev \ libboost-program-options-dev \ @@ -50,9 +48,7 @@ RUN apt-get --allow-unauthenticated update -y \ moreutils \ ncdu \ netcat-openbsd \ - ocl-icd-libopencl1 \ odbcinst \ - opencl-headers \ openssl \ perl \ pigz \ diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 3594eead992..43a92fdeebe 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -20,6 +20,14 @@ function configure() # since we run clickhouse from root sudo chown root: /var/lib/clickhouse + + # Set more frequent update period of asynchronous metrics to more frequently update information about real memory usage (less chance of OOM). + echo "1" \ + > /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml + + # Set maximum memory usage as half of total memory (less chance of OOM). + echo "0.5" \ + > /etc/clickhouse-server/config.d/max_server_memory_usage_to_ram_ratio.xml } function stop() @@ -108,6 +116,11 @@ zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" > /dev || echo -e 'No sanitizer asserts\tOK' >> /test_output/test_results.tsv rm -f /test_output/tmp +# OOM +zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ + && echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + # Logical errors zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ && echo -e 'Logical error thrown (see clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ @@ -118,7 +131,7 @@ zgrep -Fa "########################################" /var/log/clickhouse-server/ && echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv -# It also checks for OOM or crash without stacktrace (printed by watchdog) +# It also checks for crash without stacktrace (printed by watchdog) zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ && echo -e 'Fatal message in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv @@ -131,6 +144,7 @@ pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhous tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: mv /var/log/clickhouse-server/stderr.log /test_output/ tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||: +tar -chf /test_output/trace_log_dump.tar /var/lib/clickhouse/data/system/trace_log ||: # Write check result into check_status.tsv clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv diff --git a/docker/test/stress/stress b/docker/test/stress/stress index 25a705ecbd1..4fbedceb0b8 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- from multiprocessing import cpu_count -from subprocess import Popen, call, STDOUT +from subprocess import Popen, call, check_output, STDOUT import os import sys import shutil @@ -85,10 +85,27 @@ def prepare_for_hung_check(): # Issue #21004, live views are experimental, so let's just suppress it call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT) - # Wait for last queries to finish if any, not longer than 120 seconds + # Kill other queries which known to be slow + # It's query from 01232_preparing_sets_race_condition_long, it may take up to 1000 seconds in slow builds + call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """, shell=True, stderr=STDOUT) + # Long query from 00084_external_agregation + call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """, shell=True, stderr=STDOUT) + + # Wait for last queries to finish if any, not longer than 300 seconds call("""clickhouse client -q "select sleepEachRow(( - select maxOrDefault(120 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 120 - ) / 120) from numbers(120) format Null" """, shell=True, stderr=STDOUT) + select maxOrDefault(300 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 300 + ) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT) + + # Even if all clickhouse-test processes are finished, there are probably some sh scripts, + # which still run some new queries. Let's ignore them. + try: + query = """clickhouse client -q "SELECT count() FROM system.processes where where elapsed > 300" """ + output = check_output(query, shell=True, stderr=STDOUT).decode('utf-8').strip() + if int(output) == 0: + return False + except: + pass + return True if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') @@ -119,12 +136,12 @@ if __name__ == "__main__": logging.info("All processes finished") if args.hung_check: - prepare_for_hung_check() + have_long_running_queries = prepare_for_hung_check() logging.info("Checking if some queries hung") cmd = "{} {} {}".format(args.test_cmd, "--hung-check", "00001_select_1") res = call(cmd, shell=True, stderr=STDOUT) hung_check_status = "No queries hung\tOK\n" - if res != 0: + if res != 0 and have_long_running_queries: logging.info("Hung check failed with exit code {}".format(res)) hung_check_status = "Hung check failed\tFAIL\n" open(os.path.join(args.output_folder, "test_results.tsv"), 'w+').write(hung_check_status) diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index bd7eee4c166..9fa028fedca 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -35,10 +35,10 @@ RUN apt-get update \ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal +RUN pip3 install urllib3 testflows==1.6.90 docker-compose==1.29.1 docker==5.0.0 dicttoxml kazoo tzlocal python-dateutil numpy ENV DOCKER_CHANNEL stable -ENV DOCKER_VERSION 17.09.1-ce +ENV DOCKER_VERSION 20.10.6 RUN set -eux; \ \ @@ -73,5 +73,4 @@ RUN set -x \ VOLUME /var/lib/docker EXPOSE 2375 ENTRYPOINT ["dockerd-entrypoint.sh"] -CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv"] - +CMD ["sh", "-c", "python3 regression.py --no-color -o new-fails --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n1 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"] diff --git a/docs/README.md b/docs/README.md index 8b3066501bf..a4df023a6ad 100644 --- a/docs/README.md +++ b/docs/README.md @@ -126,7 +126,13 @@ Contribute all new information in English language. Other languages are translat ### Adding a New File -When adding a new file: +When you add a new file, it should end with a link like: + +`[Original article](https://clickhouse.tech/docs/) ` + +and there should be **a new empty line** after it. + +{## When adding a new file: - Make symbolic links for all other languages. You can use the following commands: @@ -134,7 +140,7 @@ When adding a new file: $ cd /ClickHouse/clone/directory/docs $ ln -sr en/new/file.md lang/new/file.md ``` - +##} ### Adding a New Language @@ -195,8 +201,11 @@ Templates: - [Function](_description_templates/template-function.md) - [Setting](_description_templates/template-setting.md) +- [Server Setting](_description_templates/template-server-setting.md) - [Database or Table engine](_description_templates/template-engine.md) - [System table](_description_templates/template-system-table.md) +- [Data type](_description_templates/data-type.md) +- [Statement](_description_templates/statement.md) diff --git a/docs/en/commercial/cloud.md b/docs/en/commercial/cloud.md index 91d2061c0af..a30d17828ab 100644 --- a/docs/en/commercial/cloud.md +++ b/docs/en/commercial/cloud.md @@ -31,8 +31,9 @@ toc_title: Cloud ## Alibaba Cloud {#alibaba-cloud} -Alibaba Cloud Managed Service for ClickHouse [China Site](https://www.aliyun.com/product/clickhouse) (Will be available at international site at May, 2021) provides the following key features: -- Highly reliable cloud disk storage engine based on Alibaba Cloud Apsara distributed system +[Alibaba Cloud Managed Service for ClickHouse](https://www.alibabacloud.com/product/clickhouse) provides the following key features: + +- Highly reliable cloud disk storage engine based on [Alibaba Cloud Apsara](https://www.alibabacloud.com/product/apsara-stack) distributed system - Expand capacity on demand without manual data migration - Support single-node, single-replica, multi-node, and multi-replica architectures, and support hot and cold data tiering - Support access allow-list, one-key recovery, multi-layer network security protection, cloud disk encryption @@ -40,6 +41,14 @@ Alibaba Cloud Managed Service for ClickHouse [China Site](https://www.aliyun.com - Built-in monitoring and database management platform - Professional database expert technical support and service +## SberCloud {#sbercloud} + +[SberCloud.Advanced](https://sbercloud.ru/en/advanced) provides [MapReduce Service (MRS)](https://docs.sbercloud.ru/mrs/ug/topics/ug__clickhouse.html), a reliable, secure, and easy-to-use enterprise-level platform for storing, processing, and analyzing big data. MRS allows you to quickly create and manage ClickHouse clusters. + +- A ClickHouse instance consists of three ZooKeeper nodes and multiple ClickHouse nodes. The Dedicated Replica mode is used to ensure high reliability of dual data copies. +- MRS provides smooth and elastic scaling capabilities to quickly meet service growth requirements in scenarios where the cluster storage capacity or CPU computing resources are not enough. When you expand the capacity of ClickHouse nodes in a cluster, MRS provides a one-click data balancing tool and gives you the initiative to balance data. You can determine the data balancing mode and time based on service characteristics to ensure service availability, implementing smooth scaling. +- MRS uses the Elastic Load Balance ensuring high availability deployment architecture to automatically distribute user access traffic to multiple backend nodes, expanding service capabilities to external systems and improving fault tolerance. With the ELB polling mechanism, data is written to local tables and read from distributed tables on different nodes. In this way, data read/write load and high availability of application access are guaranteed. + ## Tencent Cloud {#tencent-cloud} [Tencent Managed Service for ClickHouse](https://cloud.tencent.com/product/cdwch) provides the following key features: diff --git a/docs/en/commercial/index.md b/docs/en/commercial/index.md index 0f69df62c7b..90e74d88ea8 100644 --- a/docs/en/commercial/index.md +++ b/docs/en/commercial/index.md @@ -14,4 +14,4 @@ Service categories: - [Support](../commercial/support.md) !!! note "For service providers" - If you happen to represent one of them, feel free to open a pull request adding your company to the respective section (or even adding a new section if the service doesn’t fit into existing categories). The easiest way to open a pull-request for documentation page is by using a “pencil” edit button in the top-right corner. If your service available in some local market, make sure to mention it in a localized documentation page as well (or at least point it out in a pull-request description). + If you happen to represent one of them, feel free to open a pull request adding your company to the respective section (or even adding a new section if the service does not fit into existing categories). The easiest way to open a pull-request for documentation page is by using a “pencil” edit button in the top-right corner. If your service available in some local market, make sure to mention it in a localized documentation page as well (or at least point it out in a pull-request description). diff --git a/docs/en/development/adding_test_queries.md b/docs/en/development/adding_test_queries.md index 4770d48ebd4..95dfd076a12 100644 --- a/docs/en/development/adding_test_queries.md +++ b/docs/en/development/adding_test_queries.md @@ -1,6 +1,6 @@ # How to add test queries to ClickHouse CI -ClickHouse has hundreds (or even thousands) of features. Every commit get checked by a complex set of tests containing many thousands of test cases. +ClickHouse has hundreds (or even thousands) of features. Every commit gets checked by a complex set of tests containing many thousands of test cases. The core functionality is very well tested, but some corner-cases and different combinations of features can be uncovered with ClickHouse CI. @@ -105,13 +105,13 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te 5) ensure everything is correct, if the test output is incorrect (due to some bug for example), adjust the reference file using text editor. -#### How create good test +#### How to create good test - test should be - minimal - create only tables related to tested functionality, remove unrelated columns and parts of query - fast - should not take longer than few seconds (better subseconds) - correct - fails then feature is not working - - deteministic + - deterministic - isolated / stateless - don't rely on some environment things - don't rely on timing when possible @@ -120,11 +120,11 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te - don't switch databases (unless necessary) - you can create several table replicas on the same node if needed - you can use one of the test cluster definitions when needed (see system.clusters) -- use `number` / `numbers_mt` / `zeros` / `zeros_mt` and similar for queries / to initialize data when appliable +- use `number` / `numbers_mt` / `zeros` / `zeros_mt` and similar for queries / to initialize data when applicable - clean up the created objects after test and before the test (DROP IF EXISTS) - in case of some dirty state - prefer sync mode of operations (mutations, merges, etc.) - use other SQL files in the `0_stateless` folder as an example -- ensure the feature / feature combination you want to tests is not covered yet with existsing tests +- ensure the feature / feature combination you want to test is not yet covered with existing tests #### Commit / push / create PR. diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index 4ef01f4e4fb..424052001fd 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -21,11 +21,11 @@ Various `IColumn` implementations (`ColumnUInt8`, `ColumnString`, and so on) are Nevertheless, it is possible to work with individual values as well. To represent an individual value, the `Field` is used. `Field` is just a discriminated union of `UInt64`, `Int64`, `Float64`, `String` and `Array`. `IColumn` has the `operator []` method to get the n-th value as a `Field`, and the `insert` method to append a `Field` to the end of a column. These methods are not very efficient, because they require dealing with temporary `Field` objects representing an individual value. There are more efficient methods, such as `insertFrom`, `insertRangeFrom`, and so on. -`Field` doesn’t have enough information about a specific data type for a table. For example, `UInt8`, `UInt16`, `UInt32`, and `UInt64` are all represented as `UInt64` in a `Field`. +`Field` does not have enough information about a specific data type for a table. For example, `UInt8`, `UInt16`, `UInt32`, and `UInt64` are all represented as `UInt64` in a `Field`. ## Leaky Abstractions {#leaky-abstractions} -`IColumn` has methods for common relational transformations of data, but they don’t meet all needs. For example, `ColumnUInt64` doesn’t have a method to calculate the sum of two columns, and `ColumnString` doesn’t have a method to run a substring search. These countless routines are implemented outside of `IColumn`. +`IColumn` has methods for common relational transformations of data, but they do not meet all needs. For example, `ColumnUInt64` does not have a method to calculate the sum of two columns, and `ColumnString` does not have a method to run a substring search. These countless routines are implemented outside of `IColumn`. Various functions on columns can be implemented in a generic, non-efficient way using `IColumn` methods to extract `Field` values, or in a specialized way using knowledge of inner memory layout of data in a specific `IColumn` implementation. It is implemented by casting functions to a specific `IColumn` type and deal with internal representation directly. For example, `ColumnUInt64` has the `getData` method that returns a reference to an internal array, then a separate routine reads or fills that array directly. We have “leaky abstractions” to allow efficient specializations of various routines. @@ -35,7 +35,7 @@ Various functions on columns can be implemented in a generic, non-efficient way `IDataType` and `IColumn` are only loosely related to each other. Different data types can be represented in memory by the same `IColumn` implementations. For example, `DataTypeUInt32` and `DataTypeDateTime` are both represented by `ColumnUInt32` or `ColumnConstUInt32`. In addition, the same data type can be represented by different `IColumn` implementations. For example, `DataTypeUInt8` can be represented by `ColumnUInt8` or `ColumnConstUInt8`. -`IDataType` only stores metadata. For instance, `DataTypeUInt8` doesn’t store anything at all (except virtual pointer `vptr`) and `DataTypeFixedString` stores just `N` (the size of fixed-size strings). +`IDataType` only stores metadata. For instance, `DataTypeUInt8` does not store anything at all (except virtual pointer `vptr`) and `DataTypeFixedString` stores just `N` (the size of fixed-size strings). `IDataType` has helper methods for various data formats. Examples are methods to serialize a value with possible quoting, to serialize a value for JSON, and to serialize a value as part of the XML format. There is no direct correspondence to data formats. For example, the different data formats `Pretty` and `TabSeparated` can use the same `serializeTextEscaped` helper method from the `IDataType` interface. @@ -43,7 +43,7 @@ Various functions on columns can be implemented in a generic, non-efficient way A `Block` is a container that represents a subset (chunk) of a table in memory. It is just a set of triples: `(IColumn, IDataType, column name)`. During query execution, data is processed by `Block`s. If we have a `Block`, we have data (in the `IColumn` object), we have information about its type (in `IDataType`) that tells us how to deal with that column, and we have the column name. It could be either the original column name from the table or some artificial name assigned for getting temporary results of calculations. -When we calculate some function over columns in a block, we add another column with its result to the block, and we don’t touch columns for arguments of the function because operations are immutable. Later, unneeded columns can be removed from the block, but not modified. It is convenient for the elimination of common subexpressions. +When we calculate some function over columns in a block, we add another column with its result to the block, and we do not touch columns for arguments of the function because operations are immutable. Later, unneeded columns can be removed from the block, but not modified. It is convenient for the elimination of common subexpressions. Blocks are created for every processed chunk of data. Note that for the same type of calculation, the column names and types remain the same for different blocks, and only column data changes. It is better to split block data from the block header because small block sizes have a high overhead of temporary strings for copying shared_ptrs and column names. @@ -118,11 +118,11 @@ Interpreters are responsible for creating the query execution pipeline from an ` There are ordinary functions and aggregate functions. For aggregate functions, see the next section. -Ordinary functions don’t change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`’s of data to implement vectorized query execution. +Ordinary functions do not change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`’s of data to implement vectorized query execution. There are some miscellaneous functions, like [blockSize](../sql-reference/functions/other-functions.md#function-blocksize), [rowNumberInBlock](../sql-reference/functions/other-functions.md#function-rownumberinblock), and [runningAccumulate](../sql-reference/functions/other-functions.md#runningaccumulate), that exploit block processing and violate the independence of rows. -ClickHouse has strong typing, so there’s no implicit type conversion. If a function doesn’t support a specific combination of types, it throws an exception. But functions can work (be overloaded) for many different combinations of types. For example, the `plus` function (to implement the `+` operator) works for any combination of numeric types: `UInt8` + `Float32`, `UInt16` + `Int8`, and so on. Also, some variadic functions can accept any number of arguments, such as the `concat` function. +ClickHouse has strong typing, so there’s no implicit type conversion. If a function does not support a specific combination of types, it throws an exception. But functions can work (be overloaded) for many different combinations of types. For example, the `plus` function (to implement the `+` operator) works for any combination of numeric types: `UInt8` + `Float32`, `UInt16` + `Int8`, and so on. Also, some variadic functions can accept any number of arguments, such as the `concat` function. Implementing a function may be slightly inconvenient because a function explicitly dispatches supported data types and supported `IColumns`. For example, the `plus` function has code generated by instantiation of a C++ template for each combination of numeric types, and constant or non-constant left and right arguments. @@ -152,7 +152,7 @@ Internally, it is just a primitive multithreaded server without coroutines or fi The server initializes the `Context` class with the necessary environment for query execution: the list of available databases, users and access rights, settings, clusters, the process list, the query log, and so on. Interpreters use this environment. -We maintain full backward and forward compatibility for the server TCP protocol: old clients can talk to new servers, and new clients can talk to old servers. But we don’t want to maintain it eternally, and we are removing support for old versions after about one year. +We maintain full backward and forward compatibility for the server TCP protocol: old clients can talk to new servers, and new clients can talk to old servers. But we do not want to maintain it eternally, and we are removing support for old versions after about one year. !!! note "Note" For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical. @@ -169,13 +169,13 @@ There is no global query plan for distributed query execution. Each node has its `MergeTree` is a family of storage engines that supports indexing by primary key. The primary key can be an arbitrary tuple of columns or expressions. Data in a `MergeTree` table is stored in “parts”. Each part stores data in the primary key order, so data is ordered lexicographically by the primary key tuple. All the table columns are stored in separate `column.bin` files in these parts. The files consist of compressed blocks. Each block is usually from 64 KB to 1 MB of uncompressed data, depending on the average value size. The blocks consist of column values placed contiguously one after the other. Column values are in the same order for each column (the primary key defines the order), so when you iterate by many columns, you get values for the corresponding rows. -The primary key itself is “sparse”. It doesn’t address every single row, but only some ranges of data. A separate `primary.idx` file has the value of the primary key for each N-th row, where N is called `index_granularity` (usually, N = 8192). Also, for each column, we have `column.mrk` files with “marks,” which are offsets to each N-th row in the data file. Each mark is a pair: the offset in the file to the beginning of the compressed block, and the offset in the decompressed block to the beginning of data. Usually, compressed blocks are aligned by marks, and the offset in the decompressed block is zero. Data for `primary.idx` always resides in memory, and data for `column.mrk` files is cached. +The primary key itself is “sparse”. It does not address every single row, but only some ranges of data. A separate `primary.idx` file has the value of the primary key for each N-th row, where N is called `index_granularity` (usually, N = 8192). Also, for each column, we have `column.mrk` files with “marks,” which are offsets to each N-th row in the data file. Each mark is a pair: the offset in the file to the beginning of the compressed block, and the offset in the decompressed block to the beginning of data. Usually, compressed blocks are aligned by marks, and the offset in the decompressed block is zero. Data for `primary.idx` always resides in memory, and data for `column.mrk` files is cached. When we are going to read something from a part in `MergeTree`, we look at `primary.idx` data and locate ranges that could contain requested data, then look at `column.mrk` data and calculate offsets for where to start reading those ranges. Because of sparseness, excess data may be read. ClickHouse is not suitable for a high load of simple point queries, because the entire range with `index_granularity` rows must be read for each key, and the entire compressed block must be decompressed for each column. We made the index sparse because we must be able to maintain trillions of rows per single server without noticeable memory consumption for the index. Also, because the primary key is sparse, it is not unique: it cannot check the existence of the key in the table at INSERT time. You could have many rows with the same key in a table. When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. That’s why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts. -`MergeTree` is not an LSM tree because it doesn’t contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications. +`MergeTree` is not an LSM tree because it does not contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications. There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form. @@ -185,7 +185,7 @@ Replication in ClickHouse can be configured on a per-table basis. You could have Replication is implemented in the `ReplicatedMergeTree` storage engine. The path in `ZooKeeper` is specified as a parameter for the storage engine. All tables with the same path in `ZooKeeper` become replicas of each other: they synchronize their data and maintain consistency. Replicas can be added and removed dynamically simply by creating or dropping a table. -Replication uses an asynchronous multi-master scheme. You can insert data into any replica that has a session with `ZooKeeper`, and data is replicated to all other replicas asynchronously. Because ClickHouse doesn’t support UPDATEs, replication is conflict-free. As there is no quorum acknowledgment of inserts, just-inserted data might be lost if one node fails. +Replication uses an asynchronous multi-master scheme. You can insert data into any replica that has a session with `ZooKeeper`, and data is replicated to all other replicas asynchronously. Because ClickHouse does not support UPDATEs, replication is conflict-free. As there is no quorum acknowledgment of inserts, just-inserted data might be lost if one node fails. Metadata for replication is stored in ZooKeeper. There is a replication log that lists what actions to do. Actions are: get part; merge parts; drop a partition, and so on. Each replica copies the replication log to its queue and then executes the actions from the queue. For example, on insertion, the “get the part” action is created in the log, and every replica downloads that part. Merges are coordinated between replicas to get byte-identical results. All parts are merged in the same way on all replicas. One of the leaders initiates a new merge first and writes “merge parts” actions to the log. Multiple replicas (or all) can be leaders at the same time. A replica can be prevented from becoming a leader using the `merge_tree` setting `replicated_can_become_leader`. The leaders are responsible for scheduling background merges. diff --git a/docs/en/development/build-osx.md b/docs/en/development/build-osx.md index e0b1be710f1..a862bdeb299 100644 --- a/docs/en/development/build-osx.md +++ b/docs/en/development/build-osx.md @@ -5,44 +5,80 @@ toc_title: Build on Mac OS X # How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x} -Build should work on Mac OS X 10.15 (Catalina). +Build should work on x86_64 (Intel) and arm64 (Apple Silicon) based macOS 10.15 (Catalina) and higher with recent Xcode's native AppleClang, or Homebrew's vanilla Clang or GCC compilers. ## Install Homebrew {#install-homebrew} ``` bash -$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# ...and follow the printed instructions on any additional steps required to complete the installation. ``` +## Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools} + +Install the latest [Xcode](https://apps.apple.com/am/app/xcode/id497799835?mt=12) from App Store. + +Open it at least once to accept the end-user license agreement and automatically install the required components. + +Then, make sure that the latest Command Line Tools are installed and selected in the system: + +``` bash +sudo rm -rf /Library/Developer/CommandLineTools +sudo xcode-select --install +``` + +Reboot. + ## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries} ``` bash -$ brew install cmake ninja libtool gettext llvm +brew update +brew install cmake ninja libtool gettext llvm gcc ``` ## Checkout ClickHouse Sources {#checkout-clickhouse-sources} ``` bash -$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git -``` - -or - -``` bash -$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git - -$ cd ClickHouse +git clone --recursive git@github.com:ClickHouse/ClickHouse.git +# ...alternatively, you can use https://github.com/ClickHouse/ClickHouse.git as the repo URL. ``` ## Build ClickHouse {#build-clickhouse} -> Please note: ClickHouse doesn't support build with native Apple Clang compiler, we need use clang from LLVM. +To build using Xcode's native AppleClang compiler: ``` bash -$ mkdir build -$ cd build -$ cmake .. -DCMAKE_C_COMPILER=`brew --prefix llvm`/bin/clang -DCMAKE_CXX_COMPILER=`brew --prefix llvm`/bin/clang++ -DCMAKE_PREFIX_PATH=`brew --prefix llvm` -$ ninja -$ cd .. +cd ClickHouse +rm -rf build +mkdir build +cd build +cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo .. +cmake --build . --config RelWithDebInfo +cd .. +``` + +To build using Homebrew's vanilla Clang compiler: + +``` bash +cd ClickHouse +rm -rf build +mkdir build +cd build +cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo .. +cmake --build . --config RelWithDebInfo +cd .. +``` + +To build using Homebrew's vanilla GCC compiler: + +``` bash +cd ClickHouse +rm -rf build +mkdir build +cd build +cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo .. +cmake --build . --config RelWithDebInfo +cd .. ``` ## Caveats {#caveats} @@ -81,11 +117,18 @@ To do so, create the `/Library/LaunchDaemons/limit.maxfiles.plist` file with the Execute the following command: ``` bash -$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist ``` Reboot. To check if it’s working, you can use `ulimit -n` command. +## Run ClickHouse server: + +``` +cd ClickHouse +./build/programs/clickhouse-server --config-file ./programs/server/config.xml +``` + [Original article](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/en/development/build.md b/docs/en/development/build.md index 3181f26800d..8ef12221e8d 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -27,53 +27,20 @@ Or cmake3 instead of cmake on older systems. On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -```bash +```bash sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" ``` For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html). -#### Use clang-11 for Builds {#use-gcc-10-for-builds} +#### Use clang-11 for Builds ``` bash $ export CC=clang-11 $ export CXX=clang++-11 ``` -### Install GCC 10 {#install-gcc-10} - -We recommend building ClickHouse with clang-11, GCC-10 also supported, but it is not used for production builds. - -If you want to use GCC-10 there are several ways to install it. - -#### Install from Repository {#install-from-repository} - -On Ubuntu 19.10 or newer: - - $ sudo apt-get update - $ sudo apt-get install gcc-10 g++-10 - -#### Install from a PPA Package {#install-from-a-ppa-package} - -On older Ubuntu: - -``` bash -$ sudo apt-get install software-properties-common -$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test -$ sudo apt-get update -$ sudo apt-get install gcc-10 g++-10 -``` - -#### Install from Sources {#install-from-sources} - -See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) - -#### Use GCC 10 for Builds {#use-gcc-10-for-builds} - -``` bash -$ export CC=gcc-10 -$ export CXX=g++-10 -``` +Gcc can also be used though it is discouraged. ### Checkout ClickHouse Sources {#checkout-clickhouse-sources} @@ -106,9 +73,9 @@ The build requires the following components: - Git (is used only to checkout the sources, it’s not needed for the build) - CMake 3.10 or newer -- Ninja (recommended) or Make -- C++ compiler: gcc 10 or clang 8 or newer -- Linker: lld or gold (the classic GNU ld won’t work) +- Ninja +- C++ compiler: clang-11 or newer +- Linker: lld - Python (is only used inside LLVM build and it is optional) If all the components are installed, you may build in the same way as the steps above. @@ -116,7 +83,7 @@ If all the components are installed, you may build in the same way as the steps Example for Ubuntu Eoan: ``` bash sudo apt update -sudo apt install git cmake ninja-build g++ python +sudo apt install git cmake ninja-build clang++ python git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build && cd build cmake ../ClickHouse @@ -125,7 +92,7 @@ ninja Example for OpenSUSE Tumbleweed: ``` bash -sudo zypper install git cmake ninja gcc-c++ python lld +sudo zypper install git cmake ninja clang-c++ python lld git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build && cd build cmake ../ClickHouse @@ -135,7 +102,7 @@ ninja Example for Fedora Rawhide: ``` bash sudo yum update -yum --nogpg install git cmake make gcc-c++ python3 +yum --nogpg install git cmake make clang-c++ python3 git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build && cd build cmake ../ClickHouse @@ -145,11 +112,11 @@ make -j $(nproc) ## How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package} -### Install Git and Pbuilder {#install-git-and-pbuilder} +### Install Git {#install-git} ``` bash $ sudo apt-get update -$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring +$ sudo apt-get install git python debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring ``` ### Checkout ClickHouse Sources {#checkout-clickhouse-sources-1} @@ -167,7 +134,7 @@ $ ./release ## Faster builds for development -Normally all tools of the ClickHouse bundle, such as `clickhouse-server`, `clickhouse-client` etc., are linked into a single static executable, `clickhouse`. This executable must be re-linked on every change, which might be slow. Two common ways to improve linking time are to use `lld` linker, and use the 'split' build configuration, which builds a separate binary for every tool, and further splits the code into serveral shared libraries. To enable these tweaks, pass the following flags to `cmake`: +Normally all tools of the ClickHouse bundle, such as `clickhouse-server`, `clickhouse-client` etc., are linked into a single static executable, `clickhouse`. This executable must be re-linked on every change, which might be slow. Two common ways to improve linking time are to use `lld` linker, and use the 'split' build configuration, which builds a separate binary for every tool, and further splits the code into several shared libraries. To enable these tweaks, pass the following flags to `cmake`: ``` -DCMAKE_C_FLAGS="--ld-path=lld" -DCMAKE_CXX_FLAGS="--ld-path=lld" -DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1 diff --git a/docs/en/development/cmake-in-clickhouse.md b/docs/en/development/cmake-in-clickhouse.md deleted file mode 100644 index 6e6ac825587..00000000000 --- a/docs/en/development/cmake-in-clickhouse.md +++ /dev/null @@ -1,284 +0,0 @@ -# CMake in ClickHouse - -## TL; DR How to make ClickHouse compile and link faster? - -Developer only! This command will likely fulfill most of your needs. Run before calling `ninja`. - -```cmake -cmake .. \ - -DCMAKE_C_COMPILER=/bin/clang-10 \ - -DCMAKE_CXX_COMPILER=/bin/clang++-10 \ - -DCMAKE_BUILD_TYPE=Debug \ - -DENABLE_CLICKHOUSE_ALL=OFF \ - -DENABLE_CLICKHOUSE_SERVER=ON \ - -DENABLE_CLICKHOUSE_CLIENT=ON \ - -DUSE_STATIC_LIBRARIES=OFF \ - -DSPLIT_SHARED_LIBRARIES=ON \ - -DENABLE_LIBRARIES=OFF \ - -DUSE_UNWIND=ON \ - -DENABLE_UTILS=OFF \ - -DENABLE_TESTS=OFF -``` - -## CMake files types - -1. ClickHouse's source CMake files (located in the root directory and in `/src`). -2. Arch-dependent CMake files (located in `/cmake/*os_name*`). -3. Libraries finders (search for contrib libraries, located in `/cmake/find`). -3. Contrib build CMake files (used instead of libraries' own CMake files, located in `/cmake/modules`) - -## List of CMake flags - -* This list is auto-generated by [this Python script](https://github.com/clickhouse/clickhouse/blob/master/docs/tools/cmake_in_clickhouse_generator.py). -* The flag name is a link to its position in the code. -* If an option's default value is itself an option, it's also a link to its position in this list. -### ClickHouse modes - -| Name | Default value | Description | Comment | -|------|---------------|-------------|---------| -| [`ENABLE_CLICKHOUSE_ALL`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L8) | `ON` | Enable all ClickHouse modes by default | The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.), each of them may be built and linked as a separate library. If you do not know what modes you need, turn this option OFF and enable SERVER and CLIENT only. | -| [`ENABLE_CLICKHOUSE_BENCHMARK`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L18) | `ENABLE_CLICKHOUSE_ALL` | Queries benchmarking mode | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-benchmark/ | -| [`ENABLE_CLICKHOUSE_CLIENT`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L11) | `ENABLE_CLICKHOUSE_ALL` | Client mode (interactive tui/shell that connects to the server) | | -| [`ENABLE_CLICKHOUSE_COMPRESSOR`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L23) | `ENABLE_CLICKHOUSE_ALL` | Data compressor and decompressor | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-compressor/ | -| [`ENABLE_CLICKHOUSE_COPIER`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L26) | `ENABLE_CLICKHOUSE_ALL` | Inter-cluster data copying mode | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/ | -| [`ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L20) | `ENABLE_CLICKHOUSE_ALL` | Configs processor (extract values etc.) | | -| [`ENABLE_CLICKHOUSE_FORMAT`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L28) | `ENABLE_CLICKHOUSE_ALL` | Queries pretty-printer and formatter with syntax highlighting | | -| [`ENABLE_CLICKHOUSE_GIT_IMPORT`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L40) | `ENABLE_CLICKHOUSE_ALL` | A tool to analyze Git repositories | https://presentations.clickhouse.tech/matemarketing_2020/ | -| [`ENABLE_CLICKHOUSE_INSTALL`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L44) | `OFF` | Install ClickHouse without .deb/.rpm/.tgz packages (having the binary only) | | -| [`ENABLE_CLICKHOUSE_LOCAL`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L15) | `ENABLE_CLICKHOUSE_ALL` | Local files fast processing mode | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-local/ | -| [`ENABLE_CLICKHOUSE_OBFUSCATOR`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L32) | `ENABLE_CLICKHOUSE_ALL` | Table data obfuscator (convert real data to benchmark-ready one) | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-obfuscator/ | -| [`ENABLE_CLICKHOUSE_ODBC_BRIDGE`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L36) | `ENABLE_CLICKHOUSE_ALL` | HTTP-server working like a proxy to ODBC driver | https://clickhouse.tech/docs/en/operations/utilities/odbc-bridge/ | -| [`ENABLE_CLICKHOUSE_SERVER`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L10) | `ENABLE_CLICKHOUSE_ALL` | Server mode (main mode) | | - -### External libraries -Note that ClickHouse uses forks of these libraries, see https://github.com/ClickHouse-Extras. - -| Name | Default value | Description | Comment | -|------|---------------|-------------|---------| -| [`ENABLE_AMQPCPP`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/amqpcpp.cmake#L1) | `ENABLE_LIBRARIES` | Enalbe AMQP-CPP | | -| [`ENABLE_AVRO`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/avro.cmake#L2) | `ENABLE_LIBRARIES` | Enable Avro | Needed when using Apache Avro serialization format | -| [`ENABLE_BASE64`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/base64.cmake#L1) | `ENABLE_LIBRARIES` | Enable base64 | | -| [`ENABLE_BROTLI`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/brotli.cmake#L1) | `ENABLE_LIBRARIES` | Enable brotli | | -| [`ENABLE_CAPNP`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/capnp.cmake#L1) | `ENABLE_LIBRARIES` | Enable Cap'n Proto | | -| [`ENABLE_CASSANDRA`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/cassandra.cmake#L1) | `ENABLE_LIBRARIES` | Enable Cassandra | | -| [`ENABLE_CCACHE`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ccache.cmake#L22) | `ENABLE_CCACHE_BY_DEFAULT` | Speedup re-compilations using ccache (external tool) | https://ccache.dev/ | -| [`ENABLE_CLANG_TIDY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/analysis.cmake#L2) | `OFF` | Use clang-tidy static analyzer | https://clang.llvm.org/extra/clang-tidy/ | -| [`ENABLE_CURL`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/curl.cmake#L1) | `ENABLE_LIBRARIES` | Enable curl | | -| [`ENABLE_EMBEDDED_COMPILER`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/llvm.cmake#L5) | `ENABLE_LIBRARIES` | Set to TRUE to enable support for 'compile_expressions' option for query execution | | -| [`ENABLE_FASTOPS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/fastops.cmake#L2) | `ENABLE_LIBRARIES` | Enable fast vectorized mathematical functions library by Mikhail Parakhin | | -| [`ENABLE_GPERF`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/gperf.cmake#L5) | `ENABLE_LIBRARIES` | Use gperf function hash generator tool | | -| [`ENABLE_GRPC`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/grpc.cmake#L8) | `ENABLE_GRPC_DEFAULT` | Use gRPC | | -| [`ENABLE_GSASL_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/libgsasl.cmake#L1) | `ENABLE_LIBRARIES` | Enable gsasl library | | -| [`ENABLE_H3`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/h3.cmake#L1) | `ENABLE_LIBRARIES` | Enable H3 | | -| [`ENABLE_HDFS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/hdfs3.cmake#L2) | `ENABLE_LIBRARIES` | Enable HDFS | | -| [`ENABLE_ICU`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/icu.cmake#L2) | `ENABLE_LIBRARIES` | Enable ICU | | -| [`ENABLE_LDAP`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ldap.cmake#L5) | `ENABLE_LIBRARIES` | Enable LDAP | | -| [`ENABLE_LIBPQXX`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/libpqxx.cmake#L1) | `ENABLE_LIBRARIES` | Enalbe libpqxx | | -| [`ENABLE_MSGPACK`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/msgpack.cmake#L1) | `ENABLE_LIBRARIES` | Enable msgpack library | | -| [`ENABLE_MYSQL`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/mysqlclient.cmake#L2) | `ENABLE_LIBRARIES` | Enable MySQL | | -| [`ENABLE_NURAFT`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/nuraft.cmake#L1) | `ENABLE_LIBRARIES` | Enable NuRaft | | -| [`ENABLE_ODBC`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/odbc.cmake#L1) | `ENABLE_LIBRARIES` | Enable ODBC library | | -| [`ENABLE_ORC`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/orc.cmake#L1) | `ENABLE_LIBRARIES` | Enable ORC | | -| [`ENABLE_PARQUET`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/parquet.cmake#L2) | `ENABLE_LIBRARIES` | Enable parquet | | -| [`ENABLE_PROTOBUF`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/protobuf.cmake#L1) | `ENABLE_LIBRARIES` | Enable protobuf | | -| [`ENABLE_RAPIDJSON`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rapidjson.cmake#L1) | `ENABLE_LIBRARIES` | Use rapidjson | | -| [`ENABLE_RDKAFKA`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rdkafka.cmake#L1) | `ENABLE_LIBRARIES` | Enable kafka | | -| [`ENABLE_ROCKSDB`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rocksdb.cmake#L1) | `ENABLE_LIBRARIES` | Enable ROCKSDB | | -| [`ENABLE_S3`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/s3.cmake#L2) | `ENABLE_LIBRARIES` | Enable S3 | | -| [`ENABLE_SSL`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ssl.cmake#L3) | `ENABLE_LIBRARIES` | Enable ssl | Needed when securely connecting to an external server, e.g. clickhouse-client --host ... --secure | -| [`ENABLE_STATS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/stats.cmake#L1) | `ENABLE_LIBRARIES` | Enalbe StatsLib library | | - - -### External libraries system/bundled mode - -| Name | Default value | Description | Comment | -|------|---------------|-------------|---------| -| [`USE_INTERNAL_AVRO_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/avro.cmake#L11) | `ON` | Set to FALSE to use system avro library instead of bundled | | -| [`USE_INTERNAL_AWS_S3_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/s3.cmake#L14) | `ON` | Set to FALSE to use system S3 instead of bundled (experimental set to OFF on your own risk) | | -| [`USE_INTERNAL_BROTLI_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/brotli.cmake#L12) | `USE_STATIC_LIBRARIES` | Set to FALSE to use system libbrotli library instead of bundled | Many system ship only dynamic brotly libraries, so we back off to bundled by default | -| [`USE_INTERNAL_CAPNP_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/capnp.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system capnproto library instead of bundled | | -| [`USE_INTERNAL_CURL`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/curl.cmake#L10) | `NOT_UNBUNDLED` | Use internal curl library | | -| [`USE_INTERNAL_GRPC_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/grpc.cmake#L25) | `NOT_UNBUNDLED` | Set to FALSE to use system gRPC library instead of bundled. (Experimental. Set to OFF on your own risk) | Normally we use the internal gRPC framework. You can set USE_INTERNAL_GRPC_LIBRARY to OFF to force using the external gRPC framework, which should be installed in the system in this case. The external gRPC framework can be installed in the system by running sudo apt-get install libgrpc++-dev protobuf-compiler-grpc | -| [`USE_INTERNAL_GTEST_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/gtest.cmake#L3) | `NOT_UNBUNDLED` | Set to FALSE to use system Google Test instead of bundled | | -| [`USE_INTERNAL_H3_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/h3.cmake#L9) | `ON` | Set to FALSE to use system h3 library instead of bundled | | -| [`USE_INTERNAL_HDFS3_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/hdfs3.cmake#L14) | `ON` | Set to FALSE to use system HDFS3 instead of bundled (experimental - set to OFF on your own risk) | | -| [`USE_INTERNAL_ICU_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/icu.cmake#L15) | `NOT_UNBUNDLED` | Set to FALSE to use system ICU library instead of bundled | | -| [`USE_INTERNAL_LDAP_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ldap.cmake#L14) | `NOT_UNBUNDLED` | Set to FALSE to use system *LDAP library instead of bundled | | -| [`USE_INTERNAL_LIBCXX_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/cxx.cmake#L15) | `USE_INTERNAL_LIBCXX_LIBRARY_DEFAULT` | Disable to use system libcxx and libcxxabi libraries instead of bundled | | -| [`USE_INTERNAL_LIBGSASL_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/libgsasl.cmake#L12) | `USE_STATIC_LIBRARIES` | Set to FALSE to use system libgsasl library instead of bundled | when USE_STATIC_LIBRARIES we usually need to pick up hell a lot of dependencies for libgsasl | -| [`USE_INTERNAL_LIBXML2_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/libxml2.cmake#L1) | `NOT_UNBUNDLED` | Set to FALSE to use system libxml2 library instead of bundled | | -| [`USE_INTERNAL_LLVM_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/llvm.cmake#L8) | `NOT_UNBUNDLED` | Use bundled or system LLVM library. | | -| [`USE_INTERNAL_MSGPACK_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/msgpack.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system msgpack library instead of bundled | | -| [`USE_INTERNAL_MYSQL_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/mysqlclient.cmake#L15) | `NOT_UNBUNDLED` | Set to FALSE to use system mysqlclient library instead of bundled | | -| [`USE_INTERNAL_ODBC_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/odbc.cmake#L22) | `NOT_UNBUNDLED` | Use internal ODBC library | | -| [`USE_INTERNAL_ORC_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/orc.cmake#L11) | `ON` | Set to FALSE to use system ORC instead of bundled (experimental set to OFF on your own risk) | | -| [`USE_INTERNAL_PARQUET_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/parquet.cmake#L16) | `NOT_UNBUNDLED` | Set to FALSE to use system parquet library instead of bundled | | -| [`USE_INTERNAL_POCO_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/poco.cmake#L1) | `ON` | Use internal Poco library | | -| [`USE_INTERNAL_PROTOBUF_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/protobuf.cmake#L14) | `NOT_UNBUNDLED` | Set to FALSE to use system protobuf instead of bundled. (Experimental. Set to OFF on your own risk) | Normally we use the internal protobuf library. You can set USE_INTERNAL_PROTOBUF_LIBRARY to OFF to force using the external protobuf library, which should be installed in the system in this case. The external protobuf library can be installed in the system by running sudo apt-get install libprotobuf-dev protobuf-compiler libprotoc-dev | -| [`USE_INTERNAL_RAPIDJSON_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rapidjson.cmake#L9) | `NOT_UNBUNDLED` | Set to FALSE to use system rapidjson library instead of bundled | | -| [`USE_INTERNAL_RDKAFKA_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rdkafka.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system librdkafka instead of the bundled | | -| [`USE_INTERNAL_RE2_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/re2.cmake#L1) | `NOT_UNBUNDLED` | Set to FALSE to use system re2 library instead of bundled [slower] | | -| [`USE_INTERNAL_ROCKSDB_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rocksdb.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system ROCKSDB library instead of bundled | | -| [`USE_INTERNAL_SNAPPY_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/snappy.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system snappy library instead of bundled | | -| [`USE_INTERNAL_SPARSEHASH_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/sparsehash.cmake#L1) | `ON` | Set to FALSE to use system sparsehash library instead of bundled | | -| [`USE_INTERNAL_SSL_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ssl.cmake#L12) | `NOT_UNBUNDLED` | Set to FALSE to use system *ssl library instead of bundled | | -| [`USE_INTERNAL_ZLIB_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/zlib.cmake#L1) | `NOT_UNBUNDLED` | Set to FALSE to use system zlib library instead of bundled | | -| [`USE_INTERNAL_ZSTD_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/zstd.cmake#L1) | `NOT_UNBUNDLED` | Set to FALSE to use system zstd library instead of bundled | | - - -### Other flags - -| Name | Default value | Description | Comment | -|------|---------------|-------------|---------| -| [`ADD_GDB_INDEX_FOR_GOLD`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L195) | `OFF` | Add .gdb-index to resulting binaries for gold linker. | Ignored if `lld` is used | -| [`ARCH_NATIVE`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L248) | `OFF` | Add -march=native compiler flag | | -| [`CLICKHOUSE_SPLIT_BINARY`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L98) | `OFF` | Make several binaries (clickhouse-server, clickhouse-client etc.) instead of one bundled | | -| [`COMPILER_PIPE`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L235) | `ON` | -pipe compiler option | Less `/tmp` usage, more RAM usage. | -| [`ENABLE_CHECK_HEAVY_BUILDS`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L69) | `OFF` | Don't allow C++ translation units to compile too long or to take too much memory while compiling | | -| [`ENABLE_FUZZING`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L115) | `OFF` | Fuzzy testing using libfuzzer | Implies `WITH_COVERAGE` | -| [`ENABLE_LIBRARIES`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L357) | `ON` | Enable all external libraries by default | Turns on all external libs like s3, kafka, ODBC, ... | -| [`ENABLE_MULTITARGET_CODE`](https://github.com/clickhouse/clickhouse/blob/master/src/Functions/CMakeLists.txt#L100) | `ON` | Enable platform-dependent code | ClickHouse developers may use platform-dependent code under some macro (e.g. `ifdef ENABLE_MULTITARGET`). If turned ON, this option defines such macro. See `src/Functions/TargetSpecific.h` | -| [`ENABLE_TESTS`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L154) | `ON` | Provide unit_test_dbms target with Google.Test unit tests | If turned `ON`, assumes the user has either the system GTest library or the bundled one. | -| [`ENABLE_THINLTO`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L313) | `ON` | Clang-specific link time optimization | https://clang.llvm.org/docs/ThinLTO.html Applies to clang only. Disabled when building with tests or sanitizers. | -| [`FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L32) | `ON` | Stop/Fail CMake configuration if some ENABLE_XXX option is defined (either ON or OFF) but is not possible to satisfy | If turned off: e.g. when ENABLE_FOO is ON, but FOO tool was not found, the CMake will continue. | -| [`GLIBC_COMPATIBILITY`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L159) | `ON` | Enable compatibility with older glibc libraries. | Only for Linux, x86_64. Implies `ENABLE_FASTMEMCPY` | -| [`LINKER_NAME`](https://github.com/clickhouse/clickhouse/blob/master/cmake/tools.cmake#L44) | `OFF` | Linker name or full path | Example values: `lld-10`, `gold`. | -| [`LLVM_HAS_RTTI`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/llvm.cmake#L40) | `ON` | Enable if LLVM was build with RTTI enabled | | -| [`MAKE_STATIC_LIBRARIES`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L91) | `USE_STATIC_LIBRARIES` | Disable to make shared libraries | | -| [`PARALLEL_COMPILE_JOBS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/limit_jobs.cmake#L10) | `""` | Maximum number of concurrent compilation jobs | 1 if not set | -| [`PARALLEL_LINK_JOBS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/limit_jobs.cmake#L13) | `""` | Maximum number of concurrent link jobs | 1 if not set | -| [`SANITIZE`](https://github.com/clickhouse/clickhouse/blob/master/cmake/sanitize.cmake#L7) | `""` | Enable one of the code sanitizers | Possible values: - `address` (ASan) - `memory` (MSan) - `thread` (TSan) - `undefined` (UBSan) - "" (no sanitizing) | -| [`SPLIT_SHARED_LIBRARIES`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L96) | `OFF` | Keep all internal libraries as separate .so files | DEVELOPER ONLY. Faster linking if turned on. | -| [`STRIP_DEBUG_SYMBOLS_FUNCTIONS`](https://github.com/clickhouse/clickhouse/blob/master/src/Functions/CMakeLists.txt#L49) | `STRIP_DSF_DEFAULT` | Do not generate debugger info for ClickHouse functions | Provides faster linking and lower binary size. Tradeoff is the inability to debug some source files with e.g. gdb (empty stack frames and no local variables)." | -| [`UNBUNDLED`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L363) | `OFF` | Use system libraries instead of ones in contrib/ | We recommend avoiding this mode for production builds because we can't guarantee all needed libraries exist in your system. This mode exists for enthusiastic developers who are searching for trouble. Useful for maintainers of OS packages. | -| [`USE_INCLUDE_WHAT_YOU_USE`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L418) | `OFF` | Automatically reduce unneeded includes in source code (external tool) | https://github.com/include-what-you-use/include-what-you-use | -| [`USE_LIBCXX`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/cxx.cmake#L1) | `NOT_UNBUNDLED` | Use libc++ and libc++abi instead of libstdc++ | | -| [`USE_SENTRY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/sentry.cmake#L13) | `ENABLE_LIBRARIES` | Use Sentry | | -| [`USE_SIMDJSON`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/simdjson.cmake#L1) | `ENABLE_LIBRARIES` | Use simdjson | | -| [`USE_SNAPPY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/snappy.cmake#L1) | `ENABLE_LIBRARIES` | Enable snappy library | | -| [`USE_STATIC_LIBRARIES`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L90) | `ON` | Disable to use shared libraries | | -| [`USE_UNWIND`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/unwind.cmake#L1) | `ENABLE_LIBRARIES` | Enable libunwind (better stacktraces) | | -| [`WERROR`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L373) | `OFF` | Enable -Werror compiler option | Using system libs can cause a lot of warnings in includes (on macro expansion). | -| [`WEVERYTHING`](https://github.com/clickhouse/clickhouse/blob/master/cmake/warnings.cmake#L22) | `ON` | Enable -Weverything option with some exceptions. | Add some warnings that are not available even with -Wall -Wextra -Wpedantic. Intended for exploration of new compiler warnings that may be found useful. Applies to clang only | -| [`WITH_COVERAGE`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L274) | `OFF` | Profile the resulting binary/binaries | Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc | - -## Developer's guide for adding new CMake options - -### Don't be obvious. Be informative. - -Bad: -```cmake -option (ENABLE_TESTS "Enables testing" OFF) -``` - -This description is quite useless as is neither gives the viewer any additional information nor explains the option purpose. - -Better: - -```cmake -option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF) -``` - -If the option's purpose can't be guessed by its name, or the purpose guess may be misleading, or option has some -pre-conditions, leave a comment above the `option()` line and explain what it does. -The best way would be linking the docs page (if it exists). -The comment is parsed into a separate column (see below). - -Even better: - -```cmake -# implies ${TESTS_ARE_ENABLED} -# see tests/CMakeLists.txt for implementation detail. -option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF) -``` - -### If the option's state could produce unwanted (or unusual) result, explicitly warn the user. - -Suppose you have an option that may strip debug symbols from the ClickHouse's part. -This can speed up the linking process, but produces a binary that cannot be debugged. -In that case, prefer explicitly raising a warning telling the developer that he may be doing something wrong. -Also, such options should be disabled if applies. - -Bad: -```cmake -option(STRIP_DEBUG_SYMBOLS_FUNCTIONS - "Do not generate debugger info for ClickHouse functions. - ${STRIP_DSF_DEFAULT}) - -if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) - target_compile_options(clickhouse_functions PRIVATE "-g0") -endif() - -``` -Better: - -```cmake -# Provides faster linking and lower binary size. -# Tradeoff is the inability to debug some source files with e.g. gdb -# (empty stack frames and no local variables)." -option(STRIP_DEBUG_SYMBOLS_FUNCTIONS - "Do not generate debugger info for ClickHouse functions." - ${STRIP_DSF_DEFAULT}) - -if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) - message(WARNING "Not generating debugger info for ClickHouse functions") - target_compile_options(clickhouse_functions PRIVATE "-g0") -endif() -``` - -### In the option's description, explain WHAT the option does rather than WHY it does something. - -The WHY explanation should be placed in the comment. -You may find that the option's name is self-descriptive. - -Bad: - -```cmake -option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON) -``` - -Better: - -```cmake -# Only applicable for clang. -# Turned off when building with tests or sanitizers. -option(ENABLE_THINLTO "Clang-specific link time optimisation" ON). -``` - -### Don't assume other developers know as much as you do. - -In ClickHouse, there are many tools used that an ordinary developer may not know. If you are in doubt, give a link to -the tool's docs. It won't take much of your time. - -Bad: - -```cmake -option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON) -``` - -Better (combined with the above hint): - -```cmake -# https://clang.llvm.org/docs/ThinLTO.html -# Only applicable for clang. -# Turned off when building with tests or sanitizers. -option(ENABLE_THINLTO "Clang-specific link time optimisation" ON). -``` - -Other example, bad: - -```cmake -option (USE_INCLUDE_WHAT_YOU_USE "Use 'include-what-you-use' tool" OFF) -``` - -Better: - -```cmake -# https://github.com/include-what-you-use/include-what-you-use -option (USE_INCLUDE_WHAT_YOU_USE "Reduce unneeded #include s (external tool)" OFF) -``` - -### Prefer consistent default values. - -CMake allows you to pass a plethora of values representing boolean `true/false`, e.g. `1, ON, YES, ...`. -Prefer the `ON/OFF` values, if possible. diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index 76a2f647231..64ca2387029 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -5,36 +5,87 @@ toc_title: Third-Party Libraries Used # Third-Party Libraries Used {#third-party-libraries-used} -| Library | License | -|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| base64 | [BSD 2-Clause License](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | -| boost | [Boost Software License 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | -| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | -| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | -| cctz | [Apache License 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | -| double-conversion | [BSD 3-Clause License](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | -| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | -| googletest | [BSD 3-Clause License](https://github.com/google/googletest/blob/master/LICENSE) | -| h3 | [Apache License 2.0](https://github.com/uber/h3/blob/master/LICENSE) | -| hyperscan | [BSD 3-Clause License](https://github.com/intel/hyperscan/blob/master/LICENSE) | -| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | -| libdivide | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | -| libgsasl | [LGPL v2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | -| libhdfs3 | [Apache License 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | -| libmetrohash | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | -| libpcg-random | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | -| libressl | [OpenSSL License](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | -| librdkafka | [BSD 2-Clause License](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | -| libwidechar_width | [CC0 1.0 Universal](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | -| llvm | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | -| lz4 | [BSD 2-Clause License](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | -| mariadb-connector-c | [LGPL v2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | -| murmurhash | [Public Domain](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | -| pdqsort | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | -| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | -| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | -| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | -| sentry-native | [MIT License](https://github.com/getsentry/sentry-native/blob/master/LICENSE) | -| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | -| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | -| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) | +The list of third-party libraries can be obtained by the following query: + +``` +SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en' +``` + +[Example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==) + +| library_name | license_type | license_path | +|:-|:-|:-| +| abseil-cpp | Apache | /contrib/abseil-cpp/LICENSE | +| AMQP-CPP | Apache | /contrib/AMQP-CPP/LICENSE | +| arrow | Apache | /contrib/arrow/LICENSE.txt | +| avro | Apache | /contrib/avro/LICENSE.txt | +| aws | Apache | /contrib/aws/LICENSE.txt | +| aws-c-common | Apache | /contrib/aws-c-common/LICENSE | +| aws-c-event-stream | Apache | /contrib/aws-c-event-stream/LICENSE | +| aws-checksums | Apache | /contrib/aws-checksums/LICENSE | +| base64 | BSD 2-clause | /contrib/base64/LICENSE | +| boost | Boost | /contrib/boost/LICENSE_1_0.txt | +| boringssl | BSD | /contrib/boringssl/LICENSE | +| brotli | MIT | /contrib/brotli/LICENSE | +| capnproto | MIT | /contrib/capnproto/LICENSE | +| cassandra | Apache | /contrib/cassandra/LICENSE.txt | +| cctz | Apache | /contrib/cctz/LICENSE.txt | +| cityhash102 | MIT | /contrib/cityhash102/COPYING | +| cppkafka | BSD 2-clause | /contrib/cppkafka/LICENSE | +| croaring | Apache | /contrib/croaring/LICENSE | +| curl | Apache | /contrib/curl/docs/LICENSE-MIXING.md | +| cyrus-sasl | BSD 2-clause | /contrib/cyrus-sasl/COPYING | +| double-conversion | BSD 3-clause | /contrib/double-conversion/LICENSE | +| dragonbox | Apache | /contrib/dragonbox/LICENSE-Apache2-LLVM | +| fast_float | Apache | /contrib/fast_float/LICENSE | +| fastops | MIT | /contrib/fastops/LICENSE | +| flatbuffers | Apache | /contrib/flatbuffers/LICENSE.txt | +| fmtlib | Unknown | /contrib/fmtlib/LICENSE.rst | +| gcem | Apache | /contrib/gcem/LICENSE | +| googletest | BSD 3-clause | /contrib/googletest/LICENSE | +| grpc | Apache | /contrib/grpc/LICENSE | +| h3 | Apache | /contrib/h3/LICENSE | +| hyperscan | Boost | /contrib/hyperscan/LICENSE | +| icu | Public Domain | /contrib/icu/icu4c/LICENSE | +| icudata | Public Domain | /contrib/icudata/LICENSE | +| jemalloc | BSD 2-clause | /contrib/jemalloc/COPYING | +| krb5 | MIT | /contrib/krb5/src/lib/gssapi/LICENSE | +| libc-headers | LGPL | /contrib/libc-headers/LICENSE | +| libcpuid | BSD 2-clause | /contrib/libcpuid/COPYING | +| libcxx | Apache | /contrib/libcxx/LICENSE.TXT | +| libcxxabi | Apache | /contrib/libcxxabi/LICENSE.TXT | +| libdivide | zLib | /contrib/libdivide/LICENSE.txt | +| libfarmhash | MIT | /contrib/libfarmhash/COPYING | +| libgsasl | LGPL | /contrib/libgsasl/LICENSE | +| libhdfs3 | Apache | /contrib/libhdfs3/LICENSE.txt | +| libmetrohash | Apache | /contrib/libmetrohash/LICENSE | +| libpq | Unknown | /contrib/libpq/COPYRIGHT | +| libpqxx | BSD 3-clause | /contrib/libpqxx/COPYING | +| librdkafka | MIT | /contrib/librdkafka/LICENSE.murmur2 | +| libunwind | Apache | /contrib/libunwind/LICENSE.TXT | +| libuv | BSD | /contrib/libuv/LICENSE | +| llvm | Apache | /contrib/llvm/llvm/LICENSE.TXT | +| lz4 | BSD | /contrib/lz4/LICENSE | +| mariadb-connector-c | LGPL | /contrib/mariadb-connector-c/COPYING.LIB | +| miniselect | Boost | /contrib/miniselect/LICENSE_1_0.txt | +| msgpack-c | Boost | /contrib/msgpack-c/LICENSE_1_0.txt | +| murmurhash | Public Domain | /contrib/murmurhash/LICENSE | +| NuRaft | Apache | /contrib/NuRaft/LICENSE | +| openldap | Unknown | /contrib/openldap/LICENSE | +| orc | Apache | /contrib/orc/LICENSE | +| poco | Boost | /contrib/poco/LICENSE | +| protobuf | BSD 3-clause | /contrib/protobuf/LICENSE | +| rapidjson | MIT | /contrib/rapidjson/bin/jsonschema/LICENSE | +| re2 | BSD 3-clause | /contrib/re2/LICENSE | +| replxx | BSD 3-clause | /contrib/replxx/LICENSE.md | +| rocksdb | BSD 3-clause | /contrib/rocksdb/LICENSE.leveldb | +| sentry-native | MIT | /contrib/sentry-native/LICENSE | +| simdjson | Apache | /contrib/simdjson/LICENSE | +| snappy | Public Domain | /contrib/snappy/COPYING | +| sparsehash-c11 | BSD 3-clause | /contrib/sparsehash-c11/LICENSE | +| stats | Apache | /contrib/stats/LICENSE | +| thrift | Apache | /contrib/thrift/LICENSE | +| unixodbc | LGPL | /contrib/unixodbc/COPYING | +| xz | Public Domain | /contrib/xz/COPYING | +| zlib-ng | zLib | /contrib/zlib-ng/LICENSE.md | +| zstd | BSD | /contrib/zstd/LICENSE | diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md index 5511e8e19c7..ac6d4a2b563 100644 --- a/docs/en/development/developer-instruction.md +++ b/docs/en/development/developer-instruction.md @@ -15,7 +15,7 @@ ClickHouse cannot work or build on a 32-bit system. You should acquire access to To start working with ClickHouse repository you will need a GitHub account. -You probably already have one, but if you don’t, please register at https://github.com. In case you do not have SSH keys, you should generate them and then upload them on GitHub. It is required for sending over your patches. It is also possible to use the same SSH keys that you use with any other SSH servers - probably you already have those. +You probably already have one, but if you do not, please register at https://github.com. In case you do not have SSH keys, you should generate them and then upload them on GitHub. It is required for sending over your patches. It is also possible to use the same SSH keys that you use with any other SSH servers - probably you already have those. Create a fork of ClickHouse repository. To do that please click on the “fork” button in the upper right corner at https://github.com/ClickHouse/ClickHouse. It will fork your own copy of ClickHouse/ClickHouse to your account. @@ -131,17 +131,18 @@ ClickHouse uses several external libraries for building. All of them do not need ## C++ Compiler {#c-compiler} -Compilers GCC starting from version 10 and Clang version 8 or above are supported for building ClickHouse. +Compilers Clang starting from version 11 is supported for building ClickHouse. -Official Yandex builds currently use GCC because it generates machine code of slightly better performance (yielding a difference of up to several percent according to our benchmarks). And Clang is more convenient for development usually. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations. +Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations. -To install GCC on Ubuntu run: `sudo apt install gcc g++` +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -Check the version of gcc: `gcc --version`. If it is below 10, then follow the instruction here: https://clickhouse.tech/docs/en/development/build/#install-gcc-10. +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` -Mac OS X build is supported only for Clang. Just run `brew install llvm` +Mac OS X build is also supported. Just run `brew install llvm` -If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended. ## The Building Process {#the-building-process} @@ -152,14 +153,7 @@ Now that you are ready to build ClickHouse we recommend you to create a separate You can have several different directories (build_release, build_debug, etc.) for different types of build. -While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler (version 10 gcc compiler in this example). - -Linux: - - export CC=gcc-10 CXX=g++-10 - cmake .. - -Mac OS X: +While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler. export CC=clang CXX=clang++ cmake .. diff --git a/docs/en/development/style.md b/docs/en/development/style.md index 4c620c44aef..2151735c2f4 100644 --- a/docs/en/development/style.md +++ b/docs/en/development/style.md @@ -195,7 +195,7 @@ std::cerr << static_cast(c) << std::endl; The same is true for small methods in any classes or structs. -For templated classes and structs, don’t separate the method declarations from the implementation (because otherwise they must be defined in the same translation unit). +For templated classes and structs, do not separate the method declarations from the implementation (because otherwise they must be defined in the same translation unit). **31.** You can wrap lines at 140 characters, instead of 80. @@ -442,7 +442,7 @@ Use `RAII` and see above. **3.** Error handling. -Use exceptions. In most cases, you only need to throw an exception, and don’t need to catch it (because of `RAII`). +Use exceptions. In most cases, you only need to throw an exception, and do not need to catch it (because of `RAII`). In offline data processing applications, it’s often acceptable to not catch exceptions. @@ -599,7 +599,7 @@ public: There is no need to use a separate `namespace` for application code. -Small libraries don’t need this, either. +Small libraries do not need this, either. For medium to large libraries, put everything in a `namespace`. @@ -701,7 +701,7 @@ But other things being equal, cross-platform or portable code is preferred. **2.** Language: C++20 (see the list of available [C++20 features](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)). -**3.** Compiler: `gcc`. At this time (August 2020), the code is compiled using version 9.3. (It can also be compiled using `clang 8`.) +**3.** Compiler: `clang`. At this time (April 2021), the code is compiled using clang version 11. (It can also be compiled using `gcc` version 10, but it's untested and not suitable for production usage). The standard library is used (`libc++`). @@ -711,7 +711,7 @@ The standard library is used (`libc++`). The CPU instruction set is the minimum supported set among our servers. Currently, it is SSE 4.2. -**6.** Use `-Wall -Wextra -Werror` compilation flags. +**6.** Use `-Wall -Wextra -Werror` compilation flags. Also `-Weverything` is used with few exceptions. **7.** Use static linking with all libraries except those that are difficult to connect to statically (see the output of the `ldd` command). @@ -755,9 +755,9 @@ If there is a good solution already available, then use it, even if it means you (But be prepared to remove bad libraries from code.) -**3.** You can install a library that isn’t in the packages, if the packages don’t have what you need or have an outdated version or the wrong type of compilation. +**3.** You can install a library that isn’t in the packages, if the packages do not have what you need or have an outdated version or the wrong type of compilation. -**4.** If the library is small and doesn’t have its own complex build system, put the source files in the `contrib` folder. +**4.** If the library is small and does not have its own complex build system, put the source files in the `contrib` folder. **5.** Preference is always given to libraries that are already in use. diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 7547497b9af..4231bda6c35 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -35,7 +35,7 @@ Tests should use (create, drop, etc) only tables in `test` database that is assu ### Choosing the Test Name -The name of the test starts with a five-digit prefix followed by a descriptive name, such as `00422_hash_function_constexpr.sql`. To choose the prefix, find the largest prefix already present in the directory, and increment it by one. In the meantime, some other tests might be added with the same numeric prefix, but this is OK and doesn't lead to any problems, you don't have to change it later. +The name of the test starts with a five-digit prefix followed by a descriptive name, such as `00422_hash_function_constexpr.sql`. To choose the prefix, find the largest prefix already present in the directory, and increment it by one. In the meantime, some other tests might be added with the same numeric prefix, but this is OK and does not lead to any problems, you don't have to change it later. Some tests are marked with `zookeeper`, `shard` or `long` in their names. `zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that requires server to listen `127.0.0.*`; `distributed` or `global` have the same meaning. `long` is for tests that run slightly longer that one second. You can disable these groups of tests using `--no-zookeeper`, `--no-shard` and `--no-long` options, respectively. Make sure to add a proper prefix to your test name if it needs ZooKeeper or distributed queries. @@ -51,7 +51,7 @@ Do not check for a particular wording of error message, it may change in the fut ### Testing a Distributed Query -If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`. Remember to add the words `shard` or `distributed` to the test name, so that it is ran in CI in correct configurations, where the server is configured to support distributed queries. +If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`. Remember to add the words `shard` or `distributed` to the test name, so that it is run in CI in correct configurations, where the server is configured to support distributed queries. ## Known Bugs {#known-bugs} @@ -60,11 +60,11 @@ If we know some bugs that can be easily reproduced by functional tests, we place ## Integration Tests {#integration-tests} -Integration tests allow to test ClickHouse in clustered configuration and ClickHouse interaction with other servers like MySQL, Postgres, MongoDB. They are useful to emulate network splits, packet drops, etc. These tests are run under Docker and create multiple containers with various software. +Integration tests allow testing ClickHouse in clustered configuration and ClickHouse interaction with other servers like MySQL, Postgres, MongoDB. They are useful to emulate network splits, packet drops, etc. These tests are run under Docker and create multiple containers with various software. See `tests/integration/README.md` on how to run these tests. -Note that integration of ClickHouse with third-party drivers is not tested. Also we currently don’t have integration tests with our JDBC and ODBC drivers. +Note that integration of ClickHouse with third-party drivers is not tested. Also, we currently do not have integration tests with our JDBC and ODBC drivers. ## Unit Tests {#unit-tests} @@ -123,7 +123,7 @@ Example with gdb: $ sudo -u clickhouse gdb --args /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml ``` -If the system clickhouse-server is already running and you don’t want to stop it, you can change port numbers in your `config.xml` (or override them in a file in `config.d` directory), provide appropriate data path, and run it. +If the system clickhouse-server is already running and you do not want to stop it, you can change port numbers in your `config.xml` (or override them in a file in `config.d` directory), provide appropriate data path, and run it. `clickhouse` binary has almost no dependencies and works across wide range of Linux distributions. To quick and dirty test your changes on a server, you can simply `scp` your fresh built `clickhouse` binary to your server and then run it as in examples above. @@ -161,7 +161,7 @@ $ clickhouse benchmark --concurrency 16 < queries.tsv Then leave it for a night or weekend and go take a rest. -You should check that `clickhouse-server` doesn’t crash, memory footprint is bounded and performance not degrading over time. +You should check that `clickhouse-server` does not crash, memory footprint is bounded and performance not degrading over time. Precise query execution timings are not recorded and not compared due to high variability of queries and environment. @@ -230,7 +230,7 @@ Fuzzers are not built by default. To build fuzzers both `-DENABLE_FUZZING=1` and We recommend to disable Jemalloc while building fuzzers. Configuration used to integrate ClickHouse fuzzing to Google OSS-Fuzz can be found at `docker/fuzz`. -We also use simple fuzz test to generate random SQL queries and to check that the server doesn’t die executing them. +We also use simple fuzz test to generate random SQL queries and to check that the server does not die executing them. You can find it in `00746_sql_fuzzy.pl`. This test should be run continuously (overnight and longer). We also use sophisticated AST-based query fuzzer that is able to find huge amount of corner cases. It does random permutations and substitutions in queries AST. It remembers AST nodes from previous tests to use them for fuzzing of subsequent tests while processing them in random order. You can learn more about this fuzzer in [this blog article](https://clickhouse.tech/blog/en/2021/fuzzing-clickhouse/). @@ -332,7 +332,7 @@ We run tests with Yandex internal CI and job automation system named “Sandbox Build jobs and tests are run in Sandbox on per commit basis. Resulting packages and test results are published in GitHub and can be downloaded by direct links. Artifacts are stored for several months. When you send a pull request on GitHub, we tag it as “can be tested” and our CI system will build ClickHouse packages (release, debug, with address sanitizer, etc) for you. -We don’t use Travis CI due to the limit on time and computational power. -We don’t use Jenkins. It was used before and now we are happy we are not using Jenkins. +We do not use Travis CI due to the limit on time and computational power. +We do not use Jenkins. It was used before and now we are happy we are not using Jenkins. [Original article](https://clickhouse.tech/docs/en/development/tests/) diff --git a/docs/en/engines/database-engines/atomic.md b/docs/en/engines/database-engines/atomic.md index d8ad18daec2..4f5f69a5ab7 100644 --- a/docs/en/engines/database-engines/atomic.md +++ b/docs/en/engines/database-engines/atomic.md @@ -3,15 +3,52 @@ toc_priority: 32 toc_title: Atomic --- - # Atomic {#atomic} -It supports non-blocking `DROP` and `RENAME TABLE` queries and atomic `EXCHANGE TABLES t1 AND t2` queries. `Atomic` database engine is used by default. +It supports non-blocking [DROP TABLE](#drop-detach-table) and [RENAME TABLE](#rename-table) queries and atomic [EXCHANGE TABLES t1 AND t2](#exchange-tables) queries. `Atomic` database engine is used by default. ## Creating a Database {#creating-a-database} -```sql -CREATE DATABASE test ENGINE = Atomic; +``` sql + CREATE DATABASE test[ ENGINE = Atomic]; ``` -[Original article](https://clickhouse.tech/docs/en/engines/database-engines/atomic/) +## Specifics and recommendations {#specifics-and-recommendations} + +### Table UUID {#table-uuid} + +All tables in database `Atomic` have persistent [UUID](../../sql-reference/data-types/uuid.md) and store data in directory `/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`, where `xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` is UUID of the table. +Usually, the UUID is generated automatically, but the user can also explicitly specify the UUID in the same way when creating the table (this is not recommended). To display the `SHOW CREATE` query with the UUID you can use setting [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil). For example: + +```sql +CREATE TABLE name UUID '28f1c61c-2970-457a-bffe-454156ddcfef' (n UInt64) ENGINE = ...; +``` +### RENAME TABLE {#rename-table} + +`RENAME` queries are performed without changing UUID and moving table data. These queries do not wait for the completion of queries using the table and will be executed instantly. + +### DROP/DETACH TABLE {#drop-detach-table} + +On `DROP TABLE` no data is removed, database `Atomic` just marks table as dropped by moving metadata to `/clickhouse_path/metadata_dropped/` and notifies background thread. Delay before final table data deletion is specify by [database_atomic_delay_before_drop_table_sec](../../operations/server-configuration-parameters/settings.md#database_atomic_delay_before_drop_table_sec) setting. +You can specify synchronous mode using `SYNC` modifier. Use the [database_atomic_wait_for_drop_and_detach_synchronously](../../operations/settings/settings.md#database_atomic_wait_for_drop_and_detach_synchronously) setting to do this. In this case `DROP` waits for running `SELECT`, `INSERT` and other queries which are using the table to finish. Table will be actually removed when it's not in use. + +### EXCHANGE TABLES {#exchange-tables} + +`EXCHANGE` query swaps tables atomically. So instead of this non-atomic operation: + +```sql +RENAME TABLE new_table TO tmp, old_table TO new_table, tmp TO old_table; +``` +you can use one atomic query: + +``` sql +EXCHANGE TABLES new_table AND old_table; +``` + +### ReplicatedMergeTree in Atomic Database {#replicatedmergetree-in-atomic-database} + +For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables, it is recommended to not specify engine parameters - path in ZooKeeper and replica name. In this case, configuration parameters will be used [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). If you want to specify engine parameters explicitly, it is recommended to use {uuid} macros. This is useful so that unique paths are automatically generated for each table in ZooKeeper. + +## See Also + +- [system.databases](../../operations/system-tables/databases.md) system table diff --git a/docs/en/engines/database-engines/index.md b/docs/en/engines/database-engines/index.md index 2db11998483..b6892099378 100644 --- a/docs/en/engines/database-engines/index.md +++ b/docs/en/engines/database-engines/index.md @@ -18,4 +18,8 @@ You can also use the following database engines: - [Lazy](../../engines/database-engines/lazy.md) +- [Atomic](../../engines/database-engines/atomic.md) + +- [PostgreSQL](../../engines/database-engines/postgresql.md) + [Original article](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/en/engines/database-engines/postgresql.md b/docs/en/engines/database-engines/postgresql.md new file mode 100644 index 00000000000..1fa86b7ac21 --- /dev/null +++ b/docs/en/engines/database-engines/postgresql.md @@ -0,0 +1,138 @@ +--- +toc_priority: 35 +toc_title: PostgreSQL +--- + +# PostgreSQL {#postgresql} + +Allows to connect to databases on a remote [PostgreSQL](https://www.postgresql.org) server. Supports read and write operations (`SELECT` and `INSERT` queries) to exchange data between ClickHouse and PostgreSQL. + +Gives the real-time access to table list and table structure from remote PostgreSQL with the help of `SHOW TABLES` and `DESCRIBE TABLE` queries. + +Supports table structure modifications (`ALTER TABLE ... ADD|DROP COLUMN`). If `use_table_cache` parameter (see the Engine Parameters below) it set to `1`, the table structure is cached and not checked for being modified, but can be updated with `DETACH` and `ATTACH` queries. + +## Creating a Database {#creating-a-database} + +``` sql +CREATE DATABASE test_database +ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cache`]); +``` + +**Engine Parameters** + +- `host:port` — PostgreSQL server address. +- `database` — Remote database name. +- `user` — PostgreSQL user. +- `password` — User password. +- `use_table_cache` — Defines if the database table structure is cached or not. Optional. Default value: `0`. + +## Data Types Support {#data_types-support} + +| PostgerSQL | ClickHouse | +|------------------|--------------------------------------------------------------| +| DATE | [Date](../../sql-reference/data-types/date.md) | +| TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | +| REAL | [Float32](../../sql-reference/data-types/float.md) | +| DOUBLE | [Float64](../../sql-reference/data-types/float.md) | +| DECIMAL, NUMERIC | [Decimal](../../sql-reference/data-types/decimal.md) | +| SMALLINT | [Int16](../../sql-reference/data-types/int-uint.md) | +| INTEGER | [Int32](../../sql-reference/data-types/int-uint.md) | +| BIGINT | [Int64](../../sql-reference/data-types/int-uint.md) | +| SERIAL | [UInt32](../../sql-reference/data-types/int-uint.md) | +| BIGSERIAL | [UInt64](../../sql-reference/data-types/int-uint.md) | +| TEXT, CHAR | [String](../../sql-reference/data-types/string.md) | +| INTEGER | Nullable([Int32](../../sql-reference/data-types/int-uint.md))| +| ARRAY | [Array](../../sql-reference/data-types/array.md) | + + +## Examples of Use {#examples-of-use} + +Database in ClickHouse, exchanging data with the PostgreSQL server: + +``` sql +CREATE DATABASE test_database +ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 1); +``` + +``` sql +SHOW DATABASES; +``` + +``` text +┌─name──────────┐ +│ default │ +│ test_database │ +│ system │ +└───────────────┘ +``` + +``` sql +SHOW TABLES FROM test_database; +``` + +``` text +┌─name───────┐ +│ test_table │ +└────────────┘ +``` + +Reading data from the PostgreSQL table: + +``` sql +SELECT * FROM test_database.test_table; +``` + +``` text +┌─id─┬─value─┐ +│ 1 │ 2 │ +└────┴───────┘ +``` + +Writing data to the PostgreSQL table: + +``` sql +INSERT INTO test_database.test_table VALUES (3,4); +SELECT * FROM test_database.test_table; +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +│ 3 │ 4 │ +└────────┴───────┘ +``` + +Consider the table structure was modified in PostgreSQL: + +``` sql +postgre> ALTER TABLE test_table ADD COLUMN data Text +``` + +As the `use_table_cache` parameter was set to `1` when the database was created, the table structure in ClickHouse was cached and therefore not modified: + +``` sql +DESCRIBE TABLE test_database.test_table; +``` +``` text +┌─name───┬─type──────────────┐ +│ id │ Nullable(Integer) │ +│ value │ Nullable(Integer) │ +└────────┴───────────────────┘ +``` + +After detaching the table and attaching it again, the structure was updated: + +``` sql +DETACH TABLE test_database.test_table; +ATTACH TABLE test_database.test_table; +DESCRIBE TABLE test_database.test_table; +``` +``` text +┌─name───┬─type──────────────┐ +│ id │ Nullable(Integer) │ +│ value │ Nullable(Integer) │ +│ data │ Nullable(String) │ +└────────┴───────────────────┘ +``` + +[Original article](https://clickhouse.tech/docs/en/database-engines/postgresql/) diff --git a/docs/en/engines/table-engines/index.md b/docs/en/engines/table-engines/index.md index e60cdf3c899..13b3395e15b 100644 --- a/docs/en/engines/table-engines/index.md +++ b/docs/en/engines/table-engines/index.md @@ -47,12 +47,17 @@ Engines for communicating with other data storage and processing systems. Engines in the family: -- [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) -- [MySQL](../../engines/table-engines/integrations/mysql.md#mysql) -- [ODBC](../../engines/table-engines/integrations/odbc.md#table-engine-odbc) -- [JDBC](../../engines/table-engines/integrations/jdbc.md#table-engine-jdbc) -- [HDFS](../../engines/table-engines/integrations/hdfs.md#hdfs) -- [S3](../../engines/table-engines/integrations/s3.md#table-engine-s3) + +- [ODBC](../../engines/table-engines/integrations/odbc.md) +- [JDBC](../../engines/table-engines/integrations/jdbc.md) +- [MySQL](../../engines/table-engines/integrations/mysql.md) +- [MongoDB](../../engines/table-engines/integrations/mongodb.md) +- [HDFS](../../engines/table-engines/integrations/hdfs.md) +- [S3](../../engines/table-engines/integrations/s3.md) +- [Kafka](../../engines/table-engines/integrations/kafka.md) +- [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) +- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md) +- [PostgreSQL](../../engines/table-engines/integrations/postgresql.md) ### Special Engines {#special-engines} @@ -77,8 +82,8 @@ Virtual column is an integral table engine attribute that is defined in the engi You shouldn’t specify virtual columns in the `CREATE TABLE` query and you can’t see them in `SHOW CREATE TABLE` and `DESCRIBE TABLE` query results. Virtual columns are also read-only, so you can’t insert data into virtual columns. -To select data from a virtual column, you must specify its name in the `SELECT` query. `SELECT *` doesn’t return values from virtual columns. +To select data from a virtual column, you must specify its name in the `SELECT` query. `SELECT *` does not return values from virtual columns. -If you create a table with a column that has the same name as one of the table virtual columns, the virtual column becomes inaccessible. We don’t recommend doing this. To help avoid conflicts, virtual column names are usually prefixed with an underscore. +If you create a table with a column that has the same name as one of the table virtual columns, the virtual column becomes inaccessible. We do not recommend doing this. To help avoid conflicts, virtual column names are usually prefixed with an underscore. [Original article](https://clickhouse.tech/docs/en/engines/table-engines/) diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index e9e069933e5..88c8973eeab 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -1,5 +1,5 @@ --- -toc_priority: 6 +toc_priority: 9 toc_title: EmbeddedRocksDB --- diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index 0782efe8e72..cf4bb5ecbf7 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -1,5 +1,5 @@ --- -toc_priority: 4 +toc_priority: 6 toc_title: HDFS --- diff --git a/docs/en/engines/table-engines/integrations/index.md b/docs/en/engines/table-engines/integrations/index.md index 28f38375448..eb1c5411e18 100644 --- a/docs/en/engines/table-engines/integrations/index.md +++ b/docs/en/engines/table-engines/integrations/index.md @@ -1,6 +1,6 @@ --- toc_folder_title: Integrations -toc_priority: 30 +toc_priority: 1 --- # Table Engines for Integrations {#table-engines-for-integrations} @@ -19,5 +19,3 @@ List of supported integrations: - [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md) - [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) - [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) - -[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/) diff --git a/docs/en/engines/table-engines/integrations/jdbc.md b/docs/en/engines/table-engines/integrations/jdbc.md index edbc5d3ed3e..82efb842ae7 100644 --- a/docs/en/engines/table-engines/integrations/jdbc.md +++ b/docs/en/engines/table-engines/integrations/jdbc.md @@ -1,5 +1,5 @@ --- -toc_priority: 2 +toc_priority: 3 toc_title: JDBC --- diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 0ec50094a27..a3a13f9d152 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -1,5 +1,5 @@ --- -toc_priority: 5 +toc_priority: 8 toc_title: Kafka --- @@ -40,7 +40,7 @@ Required parameters: - `kafka_broker_list` — A comma-separated list of brokers (for example, `localhost:9092`). - `kafka_topic_list` — A list of Kafka topics. -- `kafka_group_name` — A group of Kafka consumers. Reading margins are tracked for each group separately. If you don’t want messages to be duplicated in the cluster, use the same group name everywhere. +- `kafka_group_name` — A group of Kafka consumers. Reading margins are tracked for each group separately. If you do not want messages to be duplicated in the cluster, use the same group name everywhere. - `kafka_format` — Message format. Uses the same notation as the SQL `FORMAT` function, such as `JSONEachRow`. For more information, see the [Formats](../../../interfaces/formats.md) section. Optional parameters: diff --git a/docs/en/engines/table-engines/integrations/mongodb.md b/docs/en/engines/table-engines/integrations/mongodb.md index 2fee27ce80d..a378ab03f55 100644 --- a/docs/en/engines/table-engines/integrations/mongodb.md +++ b/docs/en/engines/table-engines/integrations/mongodb.md @@ -1,5 +1,5 @@ --- -toc_priority: 7 +toc_priority: 5 toc_title: MongoDB --- diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 8b7caa12c91..9bd12e97dd8 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -1,5 +1,5 @@ --- -toc_priority: 3 +toc_priority: 4 toc_title: MySQL --- @@ -15,7 +15,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], ... -) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']) +SETTINGS + [connection_pool_size=16, ] + [connection_max_tries=3, ] + [connection_auto_close=true ] +; ``` See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query. diff --git a/docs/en/engines/table-engines/integrations/odbc.md b/docs/en/engines/table-engines/integrations/odbc.md index 99efd870088..26bfb6aeb0d 100644 --- a/docs/en/engines/table-engines/integrations/odbc.md +++ b/docs/en/engines/table-engines/integrations/odbc.md @@ -1,5 +1,5 @@ --- -toc_priority: 1 +toc_priority: 2 toc_title: ODBC --- diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 8326038407f..4474b764d2e 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -1,11 +1,11 @@ --- -toc_priority: 8 +toc_priority: 11 toc_title: PostgreSQL --- # PostgreSQL {#postgresql} -The PostgreSQL engine allows you to perform `SELECT` queries on data that is stored on a remote PostgreSQL server. +The PostgreSQL engine allows to perform `SELECT` and `INSERT` queries on data that is stored on a remote PostgreSQL server. ## Creating a Table {#creating-a-table} @@ -15,7 +15,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], ... -) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'); +) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]); ``` See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query. @@ -29,25 +29,51 @@ The table structure can differ from the original PostgreSQL table structure: **Engine Parameters** - `host:port` — PostgreSQL server address. - - `database` — Remote database name. - - `table` — Remote table name. - - `user` — PostgreSQL user. - - `password` — User password. +- `schema` — Non-default table schema. Optional. -SELECT Queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query. +## Implementation Details {#implementation-details} -Simple `WHERE` clauses such as `=, !=, >, >=, <, <=, IN` are executed on the PostgreSQL server. +`SELECT` queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query. + +Simple `WHERE` clauses such as `=`, `!=`, `>`, `>=`, `<`, `<=`, and `IN` are executed on the PostgreSQL server. All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes. -INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. +`INSERT` queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. -PostgreSQL Array types converts into ClickHouse arrays. -Be careful in PostgreSQL an array data created like a type_name[] may contain multi-dimensional arrays of different dimensions in different table rows in same column, but in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column. +PostgreSQL `Array` types are converted into ClickHouse arrays. + +!!! info "Note" + Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column. + +Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`. + +In the example below replica `example01-1` has the highest priority: + +```xml + + 5432 + clickhouse + qwerty + + example01-1 + 1 + + + example01-2 + 2 + + db_name + table_name
+ id=10 + SQL_QUERY +
+ +``` ## Usage Example {#usage-example} @@ -64,14 +90,14 @@ PRIMARY KEY (int_id)); CREATE TABLE -postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2); +postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); INSERT 0 1 -postgresql> select * from test; - int_id | int_nullable | float | str | float_nullable ---------+--------------+-------+------+---------------- - 1 | | 2 | test | -(1 row) +postgresql> SELECT * FROM test; + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | + (1 row) ``` Table in ClickHouse, retrieving data from the PostgreSQL table created above: @@ -87,20 +113,33 @@ ENGINE = PostgreSQL('localhost:5432', 'public', 'test', 'postges_user', 'postgre ``` ``` sql -SELECT * FROM postgresql_table WHERE str IN ('test') +SELECT * FROM postgresql_table WHERE str IN ('test'); ``` ``` text ┌─float_nullable─┬─str──┬─int_id─┐ │ ᴺᵁᴸᴸ │ test │ 1 │ └────────────────┴──────┴────────┘ -1 rows in set. Elapsed: 0.019 sec. ``` +Using Non-default Schema: -## See Also {#see-also} +```text +postgres=# CREATE SCHEMA "nice.schema"; -- [The ‘postgresql’ table function](../../../sql-reference/table-functions/postgresql.md) +postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer); + +postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i) +``` + +```sql +CREATE TABLE pg_table_schema_with_dots (a UInt32) + ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); +``` + +**See Also** + +- [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md) - [Using PostgreSQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) [Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/postgresql/) diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 476192d3969..5fb9ce5b151 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -1,5 +1,5 @@ --- -toc_priority: 6 +toc_priority: 10 toc_title: RabbitMQ --- diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 03340f2d8c9..a27308b9b3f 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -1,5 +1,5 @@ --- -toc_priority: 4 +toc_priority: 7 toc_title: S3 --- @@ -11,34 +11,34 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec ``` sql CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) ``` **Engine parameters** - `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path). - `format` — The [format](../../../interfaces/formats.md#formats) of the file. -- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — Compression type. Supported values: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Parameter is optional. By default, it will autodetect compression by file extension. +- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3). +- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension. -**Example:** +**Example** -**1.** Set up the `s3_engine_table` table: +1. Set up the `s3_engine_table` table: -```sql -CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') +``` sql +CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); ``` -**2.** Fill file: +2. Fill file: -```sql -INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` sql +INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); ``` -**3.** Query the data: +3. Query the data: -```sql -SELECT * FROM s3_engine_table LIMIT 2 +``` sql +SELECT * FROM s3_engine_table LIMIT 2; ``` ```text @@ -73,13 +73,63 @@ For more information about virtual columns see [here](../../../engines/table-eng Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function. -## S3-related Settings {#s3-settings} +**Example** + +1. Suppose we have several files in CSV format with the following URIs on S3: + +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv’ + +There are several ways to make a table consisting of all six files: + +The first way: + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV'); +``` + +Another way: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV'); +``` + +Table consists of all the files in both directories (all files should satisfy format and schema described in query): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); +``` + +If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. + +**Example** + +Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`: + +``` sql +CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); +``` + +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**See Also** + +- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) + +## S3-related settings {#settings} The following settings can be set before query execution or placed into configuration file. -- `s3_max_single_part_upload_size` — The maximum size of object to upload using singlepart upload to S3. Default value is `64Mb`. +- `s3_max_single_part_upload_size` — The maximum size of object to upload using singlepart upload to S3. Default value is `64Mb`. - `s3_min_upload_part_size` — The minimum size of part to upload during multipart upload to [S3 Multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). Default value is `512Mb`. -- `s3_max_redirects` — Max number of S3 redirects hops allowed. Default value is `10`. +- `s3_max_redirects` — Max number of S3 redirects hops allowed. Default value is `10`. Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration. @@ -89,7 +139,9 @@ The following settings can be specified in configuration file for given endpoint - `endpoint` — Specifies prefix of an endpoint. Mandatory. - `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional. -- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint. Optional, default value is `false`. +- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and [Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud) metadata for given endpoint. Optional, default value is `false`. +- `region` — Specifies S3 region name. Optional. +- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`. - `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times. - `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional. @@ -101,12 +153,15 @@ The following settings can be specified in configuration file for given endpoint https://storage.yandexcloud.net/my-test-bucket-768/ + + ``` + ## Usage {#usage-examples} Suppose we have several files in TSV format with the following URIs on HDFS: @@ -149,8 +204,7 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_p CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); ``` + ## See also - [S3 table function](../../../sql-reference/table-functions/s3.md) - -[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/s3/) diff --git a/docs/en/engines/table-engines/log-family/index.md b/docs/en/engines/table-engines/log-family/index.md index 1f6d88c20e3..8cdde239f44 100644 --- a/docs/en/engines/table-engines/log-family/index.md +++ b/docs/en/engines/table-engines/log-family/index.md @@ -38,7 +38,7 @@ Engines: ## Differences {#differences} -The `TinyLog` engine is the simplest in the family and provides the poorest functionality and lowest efficiency. The `TinyLog` engine doesn’t support parallel data reading by several threads in a single query. It reads data slower than other engines in the family that support parallel reading from a single query and it uses almost as many file descriptors as the `Log` engine because it stores each column in a separate file. Use it only in simple scenarios. +The `TinyLog` engine is the simplest in the family and provides the poorest functionality and lowest efficiency. The `TinyLog` engine does not support parallel data reading by several threads in a single query. It reads data slower than other engines in the family that support parallel reading from a single query and it uses almost as many file descriptors as the `Log` engine because it stores each column in a separate file. Use it only in simple scenarios. The `Log` and `StripeLog` engines support parallel data reading. When reading data, ClickHouse uses multiple threads. Each thread processes a separate data block. The `Log` engine uses a separate file for each column of the table. `StripeLog` stores all the data in one file. As a result, the `StripeLog` engine uses fewer file descriptors, but the `Log` engine provides higher efficiency when reading data. diff --git a/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md index 1a997b6b237..818830646cb 100644 --- a/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -3,7 +3,7 @@ toc_priority: 35 toc_title: AggregatingMergeTree --- -# Aggregatingmergetree {#aggregatingmergetree} +# AggregatingMergeTree {#aggregatingmergetree} The engine inherits from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree), altering the logic for data parts merging. ClickHouse replaces all rows with the same primary key (or more accurately, with the same [sorting key](../../../engines/table-engines/mergetree-family/mergetree.md)) with a single row (within a one data part) that stores a combination of states of aggregate functions. diff --git a/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md index ea0b265d652..4ec976eda30 100644 --- a/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -126,7 +126,7 @@ Also when there are at least 2 more “state” rows than “cancel” rows, or Thus, collapsing should not change the results of calculating statistics. Changes gradually collapsed so that in the end only the last state of almost every object left. -The `Sign` is required because the merging algorithm doesn’t guarantee that all of the rows with the same sorting key will be in the same resulting data part and even on the same physical server. ClickHouse process `SELECT` queries with multiple threads, and it can not predict the order of rows in the result. The aggregation is required if there is a need to get completely “collapsed” data from `CollapsingMergeTree` table. +The `Sign` is required because the merging algorithm does not guarantee that all of the rows with the same sorting key will be in the same resulting data part and even on the same physical server. ClickHouse process `SELECT` queries with multiple threads, and it can not predict the order of rows in the result. The aggregation is required if there is a need to get completely “collapsed” data from `CollapsingMergeTree` table. To finalize collapsing, write a query with `GROUP BY` clause and aggregate functions that account for the sign. For example, to calculate quantity, use `sum(Sign)` instead of `count()`. To calculate the sum of something, use `sum(Sign * x)` instead of `sum(x)`, and so on, and also add `HAVING sum(Sign) > 0`. diff --git a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md index 855d5fdadf4..535922875ef 100644 --- a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -33,6 +33,8 @@ ORDER BY (CounterID, StartDate, intHash32(UserID)); In this example, we set partitioning by the event types that occurred during the current week. +By default, the floating-point partition key is not supported. To use it enable the setting [allow_floating_point_partition_key](../../../operations/settings/merge-tree-settings.md#allow_floating_point_partition_key). + When inserting new data to a table, this data is stored as a separate part (chunk) sorted by the primary key. In 10-15 minutes after inserting, the parts of the same partition are merged into the entire part. !!! info "Info" diff --git a/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md index 14df9ae130e..3ead798503d 100644 --- a/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md @@ -7,7 +7,7 @@ toc_title: GraphiteMergeTree This engine is designed for thinning and aggregating/averaging (rollup) [Graphite](http://graphite.readthedocs.io/en/latest/index.html) data. It may be helpful to developers who want to use ClickHouse as a data store for Graphite. -You can use any ClickHouse table engine to store the Graphite data if you don’t need rollup, but if you need a rollup use `GraphiteMergeTree`. The engine reduces the volume of storage and increases the efficiency of queries from Graphite. +You can use any ClickHouse table engine to store the Graphite data if you do not need rollup, but if you need a rollup use `GraphiteMergeTree`. The engine reduces the volume of storage and increases the efficiency of queries from Graphite. The engine inherits properties from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md). diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index a24b7229d17..e385b234cd8 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -17,7 +17,7 @@ Main features: - Partitions can be used if the [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified. - ClickHouse supports certain operations with partitions that are more effective than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. This also improves query performance. + ClickHouse supports certain operations with partitions that are more effective than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. - Data replication support. @@ -64,7 +64,7 @@ For a description of parameters, see the [CREATE query description](../../../sql ClickHouse uses the sorting key as a primary key if the primary key is not defined obviously by the `PRIMARY KEY` clause. - Use the `ORDER BY tuple()` syntax, if you don’t need sorting. See [Selecting the Primary Key](#selecting-the-primary-key). + Use the `ORDER BY tuple()` syntax, if you do not need sorting. See [Selecting the Primary Key](#selecting-the-primary-key). - `PARTITION BY` — The [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. @@ -162,7 +162,7 @@ Data parts can be stored in `Wide` or `Compact` format. In `Wide` format each co Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the table engine. If the number of bytes or rows in a data part is less then the corresponding setting's value, the part is stored in `Compact` format. Otherwise it is stored in `Wide` format. If none of these settings is set, data parts are stored in `Wide` format. -Each data part is logically divided into granules. A granule is the smallest indivisible data set that ClickHouse reads when selecting data. ClickHouse doesn’t split rows or values, so each granule always contains an integer number of rows. The first row of a granule is marked with the value of the primary key for the row. For each data part, ClickHouse creates an index file that stores the marks. For each column, whether it’s in the primary key or not, ClickHouse also stores the same marks. These marks let you find data directly in column files. +Each data part is logically divided into granules. A granule is the smallest indivisible data set that ClickHouse reads when selecting data. ClickHouse does not split rows or values, so each granule always contains an integer number of rows. The first row of a granule is marked with the value of the primary key for the row. For each data part, ClickHouse creates an index file that stores the marks. For each column, whether it’s in the primary key or not, ClickHouse also stores the same marks. These marks let you find data directly in column files. The granule size is restricted by the `index_granularity` and `index_granularity_bytes` settings of the table engine. The number of rows in a granule lays in the `[1, index_granularity]` range, depending on the size of the rows. The size of a granule can exceed `index_granularity_bytes` if the size of a single row is greater than the value of the setting. In this case, the size of the granule equals the size of the row. @@ -191,9 +191,7 @@ Sparse indexes allow you to work with a very large number of table rows, because ClickHouse does not require a unique primary key. You can insert multiple rows with the same primary key. -You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses. To allow this feature, turn on the [allow_nullable_key](../../../operations/settings/settings.md#allow-nullable-key) setting. - -The [NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause. +You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](../../../operations/settings/settings.md#allow-nullable-key) setting. The [NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause. ### Selecting the Primary Key {#selecting-the-primary-key} @@ -229,7 +227,7 @@ This feature is helpful when using the [SummingMergeTree](../../../engines/table In this case it makes sense to leave only a few columns in the primary key that will provide efficient range scans and add the remaining dimension columns to the sorting key tuple. -[ALTER](../../../sql-reference/statements/alter/index.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts don’t need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification. +[ALTER](../../../sql-reference/statements/alter/index.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts do not need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification. ### Use of Indexes and Partitions in Queries {#use-of-indexes-and-partitions-in-queries} @@ -267,7 +265,7 @@ The key for partitioning by month allows reading only those data blocks which co Consider, for example, the days of the month. They form a [monotonic sequence](https://en.wikipedia.org/wiki/Monotonic_function) for one month, but not monotonic for more extended periods. This is a partially-monotonic sequence. If a user creates the table with partially-monotonic primary key, ClickHouse creates a sparse index as usual. When a user selects data from this kind of table, ClickHouse analyzes the query conditions. If the user wants to get data between two marks of the index and both these marks fall within one month, ClickHouse can use the index in this particular case because it can calculate the distance between the parameters of a query and index marks. -ClickHouse cannot use an index if the values of the primary key in the query parameter range don’t represent a monotonic sequence. In this case, ClickHouse uses the full scan method. +ClickHouse cannot use an index if the values of the primary key in the query parameter range do not represent a monotonic sequence. In this case, ClickHouse uses the full scan method. ClickHouse uses this logic not only for days of the month sequences, but for any primary key that represents a partially-monotonic sequence. @@ -741,6 +739,7 @@ Configuration markup: https://storage.yandexcloud.net/my-bucket/root-path/ your_access_key_id your_secret_access_key + your_base64_encoded_customer_key http://proxy1 @@ -766,7 +765,9 @@ Required parameters: - `secret_access_key` — S3 secret access key. Optional parameters: +- `region` — S3 region name. - `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`. +- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`. - `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL. - `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`. - `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`. diff --git a/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md index b82bc65afc2..ca0db24e640 100644 --- a/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md @@ -7,9 +7,9 @@ toc_title: ReplacingMergeTree The engine differs from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) in that it removes duplicate entries with the same [sorting key](../../../engines/table-engines/mergetree-family/mergetree.md) value (`ORDER BY` table section, not `PRIMARY KEY`). -Data deduplication occurs only during a merge. Merging occurs in the background at an unknown time, so you can’t plan for it. Some of the data may remain unprocessed. Although you can run an unscheduled merge using the `OPTIMIZE` query, don’t count on using it, because the `OPTIMIZE` query will read and write a large amount of data. +Data deduplication occurs only during a merge. Merging occurs in the background at an unknown time, so you can’t plan for it. Some of the data may remain unprocessed. Although you can run an unscheduled merge using the `OPTIMIZE` query, do not count on using it, because the `OPTIMIZE` query will read and write a large amount of data. -Thus, `ReplacingMergeTree` is suitable for clearing out duplicate data in the background in order to save space, but it doesn’t guarantee the absence of duplicates. +Thus, `ReplacingMergeTree` is suitable for clearing out duplicate data in the background in order to save space, but it does not guarantee the absence of duplicates. ## Creating a Table {#creating-a-table} @@ -34,7 +34,7 @@ For a description of request parameters, see [statement description](../../../sq **ReplacingMergeTree Parameters** -- `ver` — column with version. Type `UInt*`, `Date` or `DateTime`. Optional parameter. +- `ver` — column with the version number. Type `UInt*`, `Date`, `DateTime` or `DateTime64`. Optional parameter. When merging, `ReplacingMergeTree` from all the rows with the same sorting key leaves only one: @@ -66,5 +66,3 @@ All of the parameters excepting `ver` have the same meaning as in `MergeTree`. - `ver` - column with the version. Optional parameter. For a description, see the text above. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index ef34c8d3804..2db6686beb7 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -95,17 +95,19 @@ If ZooKeeper isn’t set in the config file, you can’t create replicated table ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max_replica_delay_for_distributed_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) and [fallback_to_stale_replicas_for_distributed_queries](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). -For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it doesn’t create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data. +For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it does not create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data. For very large clusters, you can use different ZooKeeper clusters for different shards. However, this hasn’t proven necessary on the Yandex.Metrica cluster (approximately 300 servers). Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting. +`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) setting which can be tuned with a server restart. + By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option. Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically. -Data blocks are deduplicated. For multiple writes of the same data block (data blocks of the same size containing the same rows in the same order), the block is only written once. The reason for this is in case of network failures when the client application doesn’t know if the data was written to the DB, so the `INSERT` query can simply be repeated. It doesn’t matter which replica INSERTs were sent to with identical data. `INSERTs` are idempotent. Deduplication parameters are controlled by [merge_tree](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-merge_tree) server settings. +Data blocks are deduplicated. For multiple writes of the same data block (data blocks of the same size containing the same rows in the same order), the block is only written once. The reason for this is in case of network failures when the client application does not know if the data was written to the DB, so the `INSERT` query can simply be repeated. It does not matter which replica INSERTs were sent to with identical data. `INSERTs` are idempotent. Deduplication parameters are controlled by [merge_tree](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-merge_tree) server settings. During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.) @@ -172,7 +174,7 @@ In this case, the path consists of the following parts: `{layer}-{shard}` is the shard identifier. In this example it consists of two parts, since the Yandex.Metrica cluster uses bi-level sharding. For most tasks, you can leave just the {shard} substitution, which will be expanded to the shard identifier. -`table_name` is the name of the node for the table in ZooKeeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it doesn’t change after a RENAME query. +`table_name` is the name of the node for the table in ZooKeeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it does not change after a RENAME query. *HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name` The two built-in substitutions `{database}` and `{table}` can be used, they expand into the table name and the database name respectively (unless these macros are defined in the `macros` section). So the zookeeper path can be specified as `'/clickhouse/tables/{layer}-{shard}/{database}/{table}'`. @@ -284,6 +286,7 @@ If the data in ZooKeeper was lost or damaged, you can save data by moving it to **See Also** - [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) +- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) - [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold) [Original article](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/en/engines/table-engines/mergetree-family/summingmergetree.md b/docs/en/engines/table-engines/mergetree-family/summingmergetree.md index 1f23e4daf51..9bfd1816d32 100644 --- a/docs/en/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/summingmergetree.md @@ -7,7 +7,7 @@ toc_title: SummingMergeTree The engine inherits from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree). The difference is that when merging data parts for `SummingMergeTree` tables ClickHouse replaces all the rows with the same primary key (or more accurately, with the same [sorting key](../../../engines/table-engines/mergetree-family/mergetree.md)) with one row which contains summarized values for the columns with the numeric data type. If the sorting key is composed in a way that a single key value corresponds to large number of rows, this significantly reduces storage volume and speeds up data selection. -We recommend to use the engine together with `MergeTree`. Store complete data in `MergeTree` table, and use `SummingMergeTree` for aggregated data storing, for example, when preparing reports. Such an approach will prevent you from losing valuable data due to an incorrectly composed primary key. +We recommend using the engine together with `MergeTree`. Store complete data in `MergeTree` table, and use `SummingMergeTree` for aggregated data storing, for example, when preparing reports. Such an approach will prevent you from losing valuable data due to an incorrectly composed primary key. ## Creating a Table {#creating-a-table} diff --git a/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index b23139b402b..93c35344e24 100644 --- a/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -133,7 +133,7 @@ When ClickHouse inserts data, it orders rows by the primary key. If the `Version ## Selecting Data {#selecting-data} -ClickHouse doesn’t guarantee that all of the rows with the same primary key will be in the same resulting data part or even on the same physical server. This is true both for writing the data and for subsequent merging of the data parts. In addition, ClickHouse processes `SELECT` queries with multiple threads, and it cannot predict the order of rows in the result. This means that aggregation is required if there is a need to get completely “collapsed” data from a `VersionedCollapsingMergeTree` table. +ClickHouse does not guarantee that all of the rows with the same primary key will be in the same resulting data part or even on the same physical server. This is true both for writing the data and for subsequent merging of the data parts. In addition, ClickHouse processes `SELECT` queries with multiple threads, and it cannot predict the order of rows in the result. This means that aggregation is required if there is a need to get completely “collapsed” data from a `VersionedCollapsingMergeTree` table. To finalize collapsing, write a query with a `GROUP BY` clause and aggregate functions that account for the sign. For example, to calculate quantity, use `sum(Sign)` instead of `count()`. To calculate the sum of something, use `sum(Sign * x)` instead of `sum(x)`, and add `HAVING sum(Sign) > 0`. @@ -219,7 +219,7 @@ HAVING sum(Sign) > 0 └─────────────────────┴───────────┴──────────┴─────────┘ ``` -If we don’t need aggregation and want to force collapsing, we can use the `FINAL` modifier for the `FROM` clause. +If we do not need aggregation and want to force collapsing, we can use the `FINAL` modifier for the `FROM` clause. ``` sql SELECT * FROM UAct FINAL diff --git a/docs/en/engines/table-engines/special/buffer.md b/docs/en/engines/table-engines/special/buffer.md index bf6c08f8f6c..cacb310a15c 100644 --- a/docs/en/engines/table-engines/special/buffer.md +++ b/docs/en/engines/table-engines/special/buffer.md @@ -18,11 +18,17 @@ Engine parameters: - `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` of independent buffers. Recommended value: 16. - `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, and `max_bytes` – Conditions for flushing data from the buffer. +Optional engine parameters: + +- `flush_time`, `flush_rows`, `flush_bytes` – Conditions for flushing data from the buffer, that will happen only in background (omitted or zero means no `flush*` parameters). + Data is flushed from the buffer and written to the destination table if all the `min*` conditions or at least one `max*` condition are met. -- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. -- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. -- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. +Also, if at least one `flush*` condition are met flush initiated in background, this is different from `max*`, since `flush*` allows you to configure background flushes separately to avoid adding latency for `INSERT` (into `Buffer`) queries. + +- `min_time`, `max_time`, `flush_time` – Condition for the time in seconds from the moment of the first write to the buffer. +- `min_rows`, `max_rows`, `flush_rows` – Condition for the number of rows in the buffer. +- `min_bytes`, `max_bytes`, `flush_bytes` – Condition for the number of bytes in the buffer. During the write operation, data is inserted to a `num_layers` number of random buffers. Or, if the data part to insert is large enough (greater than `max_rows` or `max_bytes`), it is written directly to the destination table, omitting the buffer. @@ -43,12 +49,12 @@ You can set empty strings in single quotation marks for the database and table n When reading from a Buffer table, data is processed both from the buffer and from the destination table (if there is one). Note that the Buffer tables does not support an index. In other words, data in the buffer is fully scanned, which might be slow for large buffers. (For data in a subordinate table, the index that it supports will be used.) -If the set of columns in the Buffer table doesn’t match the set of columns in a subordinate table, a subset of columns that exist in both tables is inserted. +If the set of columns in the Buffer table does not match the set of columns in a subordinate table, a subset of columns that exist in both tables is inserted. -If the types don’t match for one of the columns in the Buffer table and a subordinate table, an error message is entered in the server log and the buffer is cleared. -The same thing happens if the subordinate table doesn’t exist when the buffer is flushed. +If the types do not match for one of the columns in the Buffer table and a subordinate table, an error message is entered in the server log, and the buffer is cleared. +The same thing happens if the subordinate table does not exist when the buffer is flushed. -If you need to run ALTER for a subordinate table and the Buffer table, we recommend first deleting the Buffer table, running ALTER for the subordinate table, then creating the Buffer table again. +If you need to run ALTER for a subordinate table, and the Buffer table, we recommend first deleting the Buffer table, running ALTER for the subordinate table, then creating the Buffer table again. If the server is restarted abnormally, the data in the buffer is lost. @@ -64,6 +70,6 @@ Due to these disadvantages, we can only recommend using a Buffer table in rare c A Buffer table is used when too many INSERTs are received from a large number of servers over a unit of time and data can’t be buffered before insertion, which means the INSERTs can’t run fast enough. -Note that it doesn’t make sense to insert data one row at a time, even for Buffer tables. This will only produce a speed of a few thousand rows per second, while inserting larger blocks of data can produce over a million rows per second (see the section “Performance”). +Note that it does not make sense to insert data one row at a time, even for Buffer tables. This will only produce a speed of a few thousand rows per second, while inserting larger blocks of data can produce over a million rows per second (see the section “Performance”). [Original article](https://clickhouse.tech/docs/en/operations/table_engines/buffer/) diff --git a/docs/en/engines/table-engines/special/distributed.md b/docs/en/engines/table-engines/special/distributed.md index c47e0c27cd2..6de6602a216 100644 --- a/docs/en/engines/table-engines/special/distributed.md +++ b/docs/en/engines/table-engines/special/distributed.md @@ -25,7 +25,7 @@ The Distributed engine accepts parameters: - [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting - [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) for the examples -Also it accept the following settings: +Also, it accepts the following settings: - `fsync_after_insert` - do the `fsync` for the file data after asynchronous insert to Distributed. Guarantees that the OS flushed the whole inserted data to a file **on the initiator node** disk. @@ -124,7 +124,7 @@ Replicas are duplicating servers (in order to read all the data, you can access Cluster names must not contain dots. The parameters `host`, `port`, and optionally `user`, `password`, `secure`, `compression` are specified for each server: -- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server doesn’t start. If you change the DNS record, restart the server. +- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server does not start. If you change the DNS record, restart the server. - `port` – The TCP port for messenger activity (`tcp_port` in the config, usually set to 9000). Do not confuse it with http_port. - `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Access rights](../../../operations/access-rights.md). - `password` – The password for connecting to a remote server (not masked). Default value: empty string. @@ -143,13 +143,13 @@ To view your clusters, use the `system.clusters` table. The Distributed engine allows working with a cluster like a local server. However, the cluster is inextensible: you must write its configuration in the server config file (even better, for all the cluster’s servers). -The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don’t need to create a Distributed table – use the `remote` table function instead. See the section [Table functions](../../../sql-reference/table-functions/index.md). +The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you do not need to create a Distributed table – use the `remote` table function instead. See the section [Table functions](../../../sql-reference/table-functions/index.md). There are two methods for writing data to a cluster: First, you can define which servers to write which data to and perform the write directly on each shard. In other words, perform INSERT in the tables that the distributed table “looks at”. This is the most flexible solution as you can use any sharding scheme, which could be non-trivial due to the requirements of the subject area. This is also the most optimal solution since data can be written to different shards completely independently. -Second, you can perform INSERT in a Distributed table. In this case, the table will distribute the inserted data across the servers itself. In order to write to a Distributed table, it must have a sharding key set (the last parameter). In addition, if there is only one shard, the write operation works without specifying the sharding key, since it doesn’t mean anything in this case. +Second, you can perform INSERT in a Distributed table. In this case, the table will distribute the inserted data across the servers itself. In order to write to a Distributed table, it must have a sharding key set (the last parameter). In addition, if there is only one shard, the write operation works without specifying the sharding key, since it does not mean anything in this case. Each shard can have a weight defined in the config file. By default, the weight is equal to one. Data is distributed across shards in the amount proportional to the shard weight. For example, if there are two shards and the first has a weight of 9 while the second has a weight of 10, the first will be sent 9 / 19 parts of the rows, and the second will be sent 10 / 19. @@ -165,7 +165,7 @@ The sharding expression can be any expression from constants and table columns t A simple reminder from the division is a limited solution for sharding and isn’t always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area, rather than using entries in Distributed tables. -SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you don’t have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. +SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. You should be concerned about the sharding scheme in the following cases: diff --git a/docs/en/engines/table-engines/special/external-data.md b/docs/en/engines/table-engines/special/external-data.md index 88d76b3805e..b5429ccad12 100644 --- a/docs/en/engines/table-engines/special/external-data.md +++ b/docs/en/engines/table-engines/special/external-data.md @@ -9,7 +9,7 @@ ClickHouse allows sending a server the data that is needed for processing a quer For example, if you have a text file with important user identifiers, you can upload it to the server along with a query that uses filtration by this list. -If you need to run more than one query with a large volume of external data, don’t use this feature. It is better to upload the data to the DB ahead of time. +If you need to run more than one query with a large volume of external data, do not use this feature. It is better to upload the data to the DB ahead of time. External data can be uploaded using the command-line client (in non-interactive mode), or using the HTTP interface. diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index 2acec40ef02..17eef2b4941 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -24,7 +24,7 @@ The `Format` parameter specifies one of the available file formats. To perform `INSERT` queries – for output. The available formats are listed in the [Formats](../../../interfaces/formats.md#formats) section. -ClickHouse does not allow to specify filesystem path for`File`. It will use folder defined by [path](../../../operations/server-configuration-parameters/settings.md) setting in server configuration. +ClickHouse does not allow specifying filesystem path for`File`. It will use folder defined by [path](../../../operations/server-configuration-parameters/settings.md) setting in server configuration. When creating table using `File(Format)` it creates empty subdirectory in that folder. When data is written to that table, it’s put into `data.Format` file in that subdirectory. diff --git a/docs/en/engines/table-engines/special/join.md b/docs/en/engines/table-engines/special/join.md index 30dbec73939..4cd1c741352 100644 --- a/docs/en/engines/table-engines/special/join.md +++ b/docs/en/engines/table-engines/special/join.md @@ -28,7 +28,7 @@ See the detailed description of the [CREATE TABLE](../../../sql-reference/statem - `join_type` – [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – Key columns from the `USING` clause that the `JOIN` operation is made with. -Enter `join_strictness` and `join_type` parameters without quotes, for example, `Join(ANY, LEFT, col1)`. They must match the `JOIN` operation that the table will be used for. If the parameters don’t match, ClickHouse doesn’t throw an exception and may return incorrect data. +Enter `join_strictness` and `join_type` parameters without quotes, for example, `Join(ANY, LEFT, col1)`. They must match the `JOIN` operation that the table will be used for. If the parameters do not match, ClickHouse does not throw an exception and may return incorrect data. ## Table Usage {#table-usage} diff --git a/docs/en/engines/table-engines/special/memory.md b/docs/en/engines/table-engines/special/memory.md index a6c833ebdba..b6402c2030b 100644 --- a/docs/en/engines/table-engines/special/memory.md +++ b/docs/en/engines/table-engines/special/memory.md @@ -6,7 +6,7 @@ toc_title: Memory # Memory Table Engine {#memory} The Memory engine stores data in RAM, in uncompressed form. Data is stored in exactly the same form as it is received when read. In other words, reading from this table is completely free. -Concurrent data access is synchronized. Locks are short: read and write operations don’t block each other. +Concurrent data access is synchronized. Locks are short: read and write operations do not block each other. Indexes are not supported. Reading is parallelized. Maximal productivity (over 10 GB/sec) is reached on simple queries, because there is no reading from the disk, decompressing, or deserializing data. (We should note that in many cases, the productivity of the MergeTree engine is almost as high.) diff --git a/docs/en/faq/general/columnar-database.md b/docs/en/faq/general/columnar-database.md index 1c6a2bc2989..e30b4a94a87 100644 --- a/docs/en/faq/general/columnar-database.md +++ b/docs/en/faq/general/columnar-database.md @@ -22,4 +22,4 @@ Here is the illustration of the difference between traditional row-oriented syst **Columnar** ![Columnar](https://clickhouse.tech/docs/en/images/column-oriented.gif#) -A columnar database is a preferred choice for analytical applications because it allows to have many columns in a table just in case, but don’t pay the cost for unused columns on read query execution time. Column-oriented databases are designed for big data processing because and data warehousing, they often natively scale using distributed clusters of low-cost hardware to increase throughput. ClickHouse does it with combination of [distributed](../../engines/table-engines/special/distributed.md) and [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. +A columnar database is a preferred choice for analytical applications because it allows to have many columns in a table just in case, but do not pay the cost for unused columns on read query execution time. Column-oriented databases are designed for big data processing because and data warehousing, they often natively scale using distributed clusters of low-cost hardware to increase throughput. ClickHouse does it with combination of [distributed](../../engines/table-engines/special/distributed.md) and [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. diff --git a/docs/en/faq/general/dbms-naming.md b/docs/en/faq/general/dbms-naming.md index 88a66659ab3..d4e87ff450a 100644 --- a/docs/en/faq/general/dbms-naming.md +++ b/docs/en/faq/general/dbms-naming.md @@ -6,7 +6,7 @@ toc_priority: 10 # What Does “ClickHouse” Mean? {#what-does-clickhouse-mean} -It’s a combination of “**Click**stream” and “Data ware**House**”. It comes from the original use case at Yandex.Metrica, where ClickHouse was supposed to keep records of all clicks by people from all over the Internet and it still does the job. You can read more about this use case on [ClickHouse history](../../introduction/history.md) page. +It’s a combination of “**Click**stream” and “Data ware**House**”. It comes from the original use case at Yandex.Metrica, where ClickHouse was supposed to keep records of all clicks by people from all over the Internet, and it still does the job. You can read more about this use case on [ClickHouse history](../../introduction/history.md) page. This two-part meaning has two consequences: diff --git a/docs/en/faq/general/ne-tormozit.md b/docs/en/faq/general/ne-tormozit.md index 44fe686d670..17c5479fa6d 100644 --- a/docs/en/faq/general/ne-tormozit.md +++ b/docs/en/faq/general/ne-tormozit.md @@ -15,9 +15,9 @@ One of the following batches of those t-shirts was supposed to be given away on So, what does it mean? Here are some ways to translate *“не тормозит”*: -- If you translate it literally, it’d be something like *“ClickHouse doesn’t press the brake pedal”*. +- If you translate it literally, it’d be something like *“ClickHouse does not press the brake pedal”*. - If you’d want to express it as close to how it sounds to a Russian person with IT background, it’d be something like *“If your larger system lags, it’s not because it uses ClickHouse”*. -- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse doesn’t lag”* or just *“ClickHouse is fast”*. +- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse does not lag”* or just *“ClickHouse is fast”*. If you haven’t seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one: diff --git a/docs/en/faq/general/olap.md b/docs/en/faq/general/olap.md index f023b8c3524..1f6df183f8c 100644 --- a/docs/en/faq/general/olap.md +++ b/docs/en/faq/general/olap.md @@ -31,7 +31,7 @@ All database management systems could be classified into two groups: OLAP (Onlin In practice OLAP and OLTP are not categories, it’s more like a spectrum. Most real systems usually focus on one of them but provide some solutions or workarounds if the opposite kind of workload is also desired. This situation often forces businesses to operate multiple storage systems integrated, which might be not so big deal but having more systems make it more expensive to maintain. So the trend of recent years is HTAP (**Hybrid Transactional/Analytical Processing**) when both kinds of the workload are handled equally well by a single database management system. -Even if a DBMS started as a pure OLAP or pure OLTP, they are forced to move towards that HTAP direction to keep up with their competition. And ClickHouse is no exception, initially, it has been designed as [fast-as-possible OLAP system](../../faq/general/why-clickhouse-is-so-fast.md) and it still doesn’t have full-fledged transaction support, but some features like consistent read/writes and mutations for updating/deleting data had to be added. +Even if a DBMS started as a pure OLAP or pure OLTP, they are forced to move towards that HTAP direction to keep up with their competition. And ClickHouse is no exception, initially, it has been designed as [fast-as-possible OLAP system](../../faq/general/why-clickhouse-is-so-fast.md) and it still does not have full-fledged transaction support, but some features like consistent read/writes and mutations for updating/deleting data had to be added. The fundamental trade-off between OLAP and OLTP systems remains: diff --git a/docs/en/faq/general/who-is-using-clickhouse.md b/docs/en/faq/general/who-is-using-clickhouse.md index 2ae07507123..b7ff867d726 100644 --- a/docs/en/faq/general/who-is-using-clickhouse.md +++ b/docs/en/faq/general/who-is-using-clickhouse.md @@ -6,9 +6,9 @@ toc_priority: 9 # Who Is Using ClickHouse? {#who-is-using-clickhouse} -Being an open-source product makes this question not so straightforward to answer. You don’t have to tell anyone if you want to start using ClickHouse, you just go grab source code or pre-compiled packages. There’s no contract to sign and the [Apache 2.0 license](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) allows for unconstrained software distribution. +Being an open-source product makes this question not so straightforward to answer. You do not have to tell anyone if you want to start using ClickHouse, you just go grab source code or pre-compiled packages. There’s no contract to sign and the [Apache 2.0 license](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) allows for unconstrained software distribution. -Also, the technology stack is often in a grey zone of what’s covered by an NDA. Some companies consider technologies they use as a competitive advantage even if they are open-source and don’t allow employees to share any details publicly. Some see some PR risks and allow employees to share implementation details only with their PR department approval. +Also, the technology stack is often in a grey zone of what’s covered by an NDA. Some companies consider technologies they use as a competitive advantage even if they are open-source and do not allow employees to share any details publicly. Some see some PR risks and allow employees to share implementation details only with their PR department approval. So how to tell who is using ClickHouse? diff --git a/docs/en/faq/index.md b/docs/en/faq/index.md index 1ae71cf680c..1e9c3b8ae64 100644 --- a/docs/en/faq/index.md +++ b/docs/en/faq/index.md @@ -39,7 +39,7 @@ Question candidates: - How to kill a process (query) in ClickHouse? - How to implement pivot (like in pandas)? - How to remove the default ClickHouse user through users.d? -- Importing MySQL dump to Clickhouse +- Importing MySQL dump to ClickHouse - Window function workarounds (row_number, lag/lead, running diff/sum/average) ##} diff --git a/docs/en/faq/operations/delete-old-data.md b/docs/en/faq/operations/delete-old-data.md index fdf1f1f290e..32fc485e98a 100644 --- a/docs/en/faq/operations/delete-old-data.md +++ b/docs/en/faq/operations/delete-old-data.md @@ -12,7 +12,7 @@ The short answer is “yes”. ClickHouse has multiple mechanisms that allow fre ClickHouse allows to automatically drop values when some condition happens. This condition is configured as an expression based on any columns, usually just static offset for any timestamp column. -The key advantage of this approach is that it doesn’t need any external system to trigger, once TTL is configured, data removal happens automatically in background. +The key advantage of this approach is that it does not need any external system to trigger, once TTL is configured, data removal happens automatically in background. !!! note "Note" TTL can also be used to move data not only to [/dev/null](https://en.wikipedia.org/wiki/Null_device), but also between different storage systems, like from SSD to HDD. @@ -21,7 +21,7 @@ More details on [configuring TTL](../../engines/table-engines/mergetree-family/m ## ALTER DELETE {#alter-delete} -ClickHouse doesn’t have real-time point deletes like in [OLTP](https://en.wikipedia.org/wiki/Online_transaction_processing) databases. The closest thing to them are mutations. They are issued as `ALTER ... DELETE` or `ALTER ... UPDATE` queries to distinguish from normal `DELETE` or `UPDATE` as they are asynchronous batch operations, not immediate modifications. The rest of syntax after `ALTER TABLE` prefix is similar. +ClickHouse does not have real-time point deletes like in [OLTP](https://en.wikipedia.org/wiki/Online_transaction_processing) databases. The closest thing to them are mutations. They are issued as `ALTER ... DELETE` or `ALTER ... UPDATE` queries to distinguish from normal `DELETE` or `UPDATE` as they are asynchronous batch operations, not immediate modifications. The rest of syntax after `ALTER TABLE` prefix is similar. `ALTER DELETE` can be issued to flexibly remove old data. If you need to do it regularly, the main downside will be the need to have an external system to submit the query. There are also some performance considerations since mutation rewrite complete parts even there’s only a single row to be deleted. diff --git a/docs/en/faq/operations/production.md b/docs/en/faq/operations/production.md index 77f7a76f2f9..52ca300ced0 100644 --- a/docs/en/faq/operations/production.md +++ b/docs/en/faq/operations/production.md @@ -25,7 +25,7 @@ Here’re some key points to get reasonable fidelity in a pre-production environ - Don’t make it read-only with some frozen data. - Don’t make it write-only with just copying data without building some typical reports. - Don’t wipe it clean instead of applying schema migrations. -- Use a sample of real production data and queries. Try to choose a sample that’s still representative and makes `SELECT` queries return reasonable results. Use obfuscation if your data is sensitive and internal policies don’t allow it to leave the production environment. +- Use a sample of real production data and queries. Try to choose a sample that’s still representative and makes `SELECT` queries return reasonable results. Use obfuscation if your data is sensitive and internal policies do not allow it to leave the production environment. - Make sure that pre-production is covered by your monitoring and alerting software the same way as your production environment does. - If your production spans across multiple datacenters or regions, make your pre-production does the same. - If your production uses complex features like replication, distributed table, cascading materialize views, make sure they are configured similarly in pre-production. @@ -61,8 +61,8 @@ For production use, there are two key options: `stable` and `lts`. Here is some - `stable` is the kind of package we recommend by default. They are released roughly monthly (and thus provide new features with reasonable delay) and three latest stable releases are supported in terms of diagnostics and backporting of bugfixes. - `lts` are released twice a year and are supported for a year after their initial release. You might prefer them over `stable` in the following cases: - - Your company has some internal policies that don’t allow for frequent upgrades or using non-LTS software. - - You are using ClickHouse in some secondary products that either doesn’t require any complex ClickHouse features and don’t have enough resources to keep it updated. + - Your company has some internal policies that do not allow for frequent upgrades or using non-LTS software. + - You are using ClickHouse in some secondary products that either does not require any complex ClickHouse features and do not have enough resources to keep it updated. Many teams who initially thought that `lts` is the way to go, often switch to `stable` anyway because of some recent feature that’s important for their product. diff --git a/docs/en/getting-started/example-datasets/cell-towers.md b/docs/en/getting-started/example-datasets/cell-towers.md index 76effdd4c62..7028b650ad1 100644 --- a/docs/en/getting-started/example-datasets/cell-towers.md +++ b/docs/en/getting-started/example-datasets/cell-towers.md @@ -3,31 +3,31 @@ toc_priority: 21 toc_title: Cell Towers --- -# Cell Towers +# Cell Towers {#cell-towers} This dataset is from [OpenCellid](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers. -As of 2021 it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc). +As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc). -OpenCelliD Project is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License, and we redistribute a snapshot of this dataset under the terms of the same license. The up to date version of the dataset is available to download after sign in. +OpenCelliD Project is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License, and we redistribute a snapshot of this dataset under the terms of the same license. The up-to-date version of the dataset is available to download after sign in. -## Get the Dataset +## Get the Dataset {#get-the-dataset} -Download the snapshot of the dataset from Feb 2021: [https://datasets.clickhouse.tech/cell_towers.csv.xz] (729 MB). +1. Download the snapshot of the dataset from February 2021: [https://datasets.clickhouse.tech/cell_towers.csv.xz] (729 MB). -Optionally validate the integrity: +2. Validate the integrity (optional step): ``` md5sum cell_towers.csv.xz 8cf986f4a0d9f12c6f384a0e9192c908 cell_towers.csv.xz ``` -Decompress it with the following command: +3. Decompress it with the following command: ``` xz -d cell_towers.csv.xz ``` -Create a table: +4. Create a table: ``` CREATE TABLE cell_towers @@ -50,15 +50,15 @@ CREATE TABLE cell_towers ENGINE = MergeTree ORDER BY (radio, mcc, net, created); ``` -Insert the dataset: +5. Insert the dataset: ``` clickhouse-client --query "INSERT INTO cell_towers FORMAT CSVWithNames" < cell_towers.csv ``` +## Examples {#examples} -## Run some queries +1. A number of cell towers by type: -Number of cell towers by type: ``` SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC @@ -73,7 +73,8 @@ SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC 5 rows in set. Elapsed: 0.011 sec. Processed 43.28 million rows, 43.28 MB (3.83 billion rows/s., 3.83 GB/s.) ``` -Cell towers by mobile country code (MCC): +2. Cell towers by [mobile country code (MCC)](https://en.wikipedia.org/wiki/Mobile_country_code): + ``` SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 @@ -93,28 +94,28 @@ SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 10 rows in set. Elapsed: 0.019 sec. Processed 43.28 million rows, 86.55 MB (2.33 billion rows/s., 4.65 GB/s.) ``` -See the dictionary here: [https://en.wikipedia.org/wiki/Mobile_country_code](https://en.wikipedia.org/wiki/Mobile_country_code). +So, the top countries are: the USA, Germany, and Russia. -So, the top countries are USA, Germany and Russia. - -You may want to create an [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts/) in ClickHouse to decode these values. +You may want to create an [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values. -### Example of using `pointInPolygon` function +## Use case {#use-case} -Create a table where we will store polygons: +Using `pointInPolygon` function. + +1. Create a table where we will store polygons: ``` CREATE TEMPORARY TABLE moscow (polygon Array(Tuple(Float64, Float64))); ``` -This is a rough shape of Moscow (without "new Moscow"): +2. This is a rough shape of Moscow (without "new Moscow"): ``` INSERT INTO moscow VALUES ([(37.84172564285271, 55.78000432402266), (37.8381207618713, 55.775874525970494), (37.83979446823122, 55.775626746008065), (37.84243326983639, 55.77446586811748), (37.84262672750849, 55.771974101091104), (37.84153238623039, 55.77114545193181), (37.841124690460184, 55.76722010265554), (37.84239076983644, 55.76654891107098), (37.842283558197025, 55.76258709833121), (37.8421759312134, 55.758073999993734), (37.84198330422974, 55.75381499999371), (37.8416827275085, 55.749277102484484), (37.84157576190186, 55.74794544108413), (37.83897929098507, 55.74525257875241), (37.83739676451868, 55.74404373042019), (37.838732481460525, 55.74298009816793), (37.841183997352545, 55.743060321833575), (37.84097476190185, 55.73938799999373), (37.84048155819702, 55.73570799999372), (37.840095812164286, 55.73228210777237), (37.83983814285274, 55.73080491981639), (37.83846476321406, 55.729799917464675), (37.83835745269769, 55.72919751082619), (37.838636380279524, 55.72859509486539), (37.8395161005249, 55.727705075632784), (37.83897964285276, 55.722727886185154), (37.83862557539366, 55.72034817326636), (37.83559735744853, 55.71944437307499), (37.835370708803126, 55.71831419154461), (37.83738169402022, 55.71765218986692), (37.83823396494291, 55.71691750159089), (37.838056931213345, 55.71547311301385), (37.836812846557606, 55.71221445615604), (37.83522525396725, 55.709331054395555), (37.83269301586908, 55.70953687463627), (37.829667367706236, 55.70903403789297), (37.83311126588435, 55.70552351822608), (37.83058993121339, 55.70041317726053), (37.82983872750851, 55.69883771404813), (37.82934501586913, 55.69718947487017), (37.828926414016685, 55.69504441658371), (37.82876530422971, 55.69287499999378), (37.82894754100031, 55.690759754047335), (37.827697554878185, 55.68951421135665), (37.82447346292115, 55.68965045405069), (37.83136543914793, 55.68322046195302), (37.833554015869154, 55.67814012759211), (37.83544184655761, 55.67295011628339), (37.837480388885474, 55.6672498719639), (37.838960677246064, 55.66316274139358), (37.83926093121332, 55.66046999999383), (37.839025050262435, 55.65869897264431), (37.83670784390257, 55.65794084879904), (37.835656529083245, 55.65694309303843), (37.83704060449217, 55.65689306460552), (37.83696819873806, 55.65550363526252), (37.83760389616388, 55.65487847246661), (37.83687972750851, 55.65356745541324), (37.83515216004943, 55.65155951234079), (37.83312418518067, 55.64979413590619), (37.82801726983639, 55.64640836412121), (37.820614174591, 55.64164525405531), (37.818908190475426, 55.6421883258084), (37.81717543386075, 55.64112490388471), (37.81690987037274, 55.63916106913107), (37.815099354492155, 55.637925371757085), (37.808769150787356, 55.633798276884455), (37.80100123544311, 55.62873670012244), (37.79598013491824, 55.62554336109055), (37.78634567724606, 55.62033499605651), (37.78334147619623, 55.618768681480326), (37.77746201055901, 55.619855533402706), (37.77527329626457, 55.61909966711279), (37.77801986242668, 55.618770300976294), (37.778212973541216, 55.617257701952106), (37.77784818518065, 55.61574504433011), (37.77016867724609, 55.61148576294007), (37.760191219573976, 55.60599579539028), (37.75338926983641, 55.60227892751446), (37.746329965606634, 55.59920577639331), (37.73939925396728, 55.59631430313617), (37.73273665739439, 55.5935318803559), (37.7299954450912, 55.59350760316188), (37.7268679946899, 55.59469840523759), (37.72626726983634, 55.59229549697373), (37.7262673598022, 55.59081598950582), (37.71897193121335, 55.5877595845419), (37.70871550793456, 55.58393177431724), (37.700497489410374, 55.580917323756644), (37.69204305026244, 55.57778089778455), (37.68544477378839, 55.57815154690915), (37.68391050793454, 55.57472945079756), (37.678803592590306, 55.57328235936491), (37.6743402539673, 55.57255251445782), (37.66813862698363, 55.57216388774464), (37.617927457672096, 55.57505691895805), (37.60443099999999, 55.5757737568051), (37.599683515869145, 55.57749105910326), (37.59754177842709, 55.57796291823627), (37.59625834786988, 55.57906686095235), (37.59501783265684, 55.57746616444403), (37.593090671936025, 55.57671634534502), (37.587018007904, 55.577944600233785), (37.578692203704804, 55.57982895000019), (37.57327546607398, 55.58116294118248), (37.57385012109279, 55.581550362779), (37.57399562266922, 55.5820107079112), (37.5735356072979, 55.58226289171689), (37.57290393054962, 55.582393529795155), (37.57037722355653, 55.581919415056234), (37.5592298306885, 55.584471614867844), (37.54189249206543, 55.58867650795186), (37.5297256269836, 55.59158133551745), (37.517837865081766, 55.59443656218868), (37.51200186508174, 55.59635625174229), (37.506808949737554, 55.59907823904434), (37.49820432275389, 55.6062944994944), (37.494406071441674, 55.60967103463367), (37.494760001358024, 55.61066689753365), (37.49397137107085, 55.61220931698269), (37.49016528606031, 55.613417718449064), (37.48773249206542, 55.61530616333343), (37.47921386508177, 55.622640129112334), (37.470652153442394, 55.62993723476164), (37.46273446298218, 55.6368075123157), (37.46350692265317, 55.64068225239439), (37.46050283203121, 55.640794546982576), (37.457627470916734, 55.64118904154646), (37.450718034393326, 55.64690488145138), (37.44239252645875, 55.65397824729769), (37.434587576721185, 55.66053543155961), (37.43582144975277, 55.661693766520735), (37.43576786245721, 55.662755031737014), (37.430982915344174, 55.664610641628116), (37.428547447097685, 55.66778515273695), (37.42945134592044, 55.668633314343566), (37.42859571562949, 55.66948145750025), (37.4262836402282, 55.670813882451405), (37.418709037048295, 55.6811141674414), (37.41922139651101, 55.68235377885389), (37.419218771842885, 55.68359335082235), (37.417196501327446, 55.684375235224735), (37.41607020370478, 55.68540557585352), (37.415640857147146, 55.68686637150793), (37.414632153442334, 55.68903015131686), (37.413344899475064, 55.690896881757396), (37.41171432275391, 55.69264232162232), (37.40948282275393, 55.69455101638112), (37.40703674603271, 55.69638690385348), (37.39607169577025, 55.70451821283731), (37.38952706878662, 55.70942491932811), (37.387778313491815, 55.71149057784176), (37.39049275399779, 55.71419814298992), (37.385557272491454, 55.7155489617061), (37.38388335714726, 55.71849856042102), (37.378368238098155, 55.7292763261685), (37.37763597123337, 55.730845879211614), (37.37890062088197, 55.73167906388319), (37.37750451918789, 55.734703664681774), (37.375610832015965, 55.734851959522246), (37.3723813571472, 55.74105626086403), (37.37014935714723, 55.746115620904355), (37.36944173016362, 55.750883999993725), (37.36975304365541, 55.76335905525834), (37.37244070571134, 55.76432079697595), (37.3724259757175, 55.76636979670426), (37.369922155757884, 55.76735417953104), (37.369892695770275, 55.76823419316575), (37.370214730163575, 55.782312184391266), (37.370493611114505, 55.78436801120489), (37.37120164550783, 55.78596427165359), (37.37284851456452, 55.7874378183096), (37.37608325135799, 55.7886695054807), (37.3764587460632, 55.78947647305964), (37.37530000265506, 55.79146512926804), (37.38235915344241, 55.79899647809345), (37.384344043655396, 55.80113596939471), (37.38594269577028, 55.80322699999366), (37.38711208598329, 55.804919036911976), (37.3880239841309, 55.806610999993666), (37.38928977249147, 55.81001864976979), (37.39038389947512, 55.81348641242801), (37.39235781481933, 55.81983538336746), (37.393709457672124, 55.82417822811877), (37.394685720901464, 55.82792275755836), (37.39557615344238, 55.830447148154136), (37.39844478226658, 55.83167107969975), (37.40019761214057, 55.83151823557964), (37.400398790382326, 55.83264967594742), (37.39659544313046, 55.83322180909622), (37.39667059524539, 55.83402792148566), (37.39682089947515, 55.83638877400216), (37.39643489154053, 55.83861656112751), (37.3955338994751, 55.84072348043264), (37.392680272491454, 55.84502158126453), (37.39241188227847, 55.84659117913199), (37.392529730163616, 55.84816071336481), (37.39486835714723, 55.85288092980303), (37.39873052645878, 55.859893456073635), (37.40272161111449, 55.86441833633205), (37.40697072750854, 55.867579567544375), (37.410007082016016, 55.868369880337), (37.4120992989502, 55.86920843741314), (37.412668021163924, 55.87055369615854), (37.41482461111453, 55.87170587948249), (37.41862266137694, 55.873183961039565), (37.42413732540892, 55.874879126654704), (37.4312182698669, 55.875614937236705), (37.43111093783558, 55.8762723478417), (37.43332105622856, 55.87706546369396), (37.43385747619623, 55.87790681284802), (37.441303050262405, 55.88027084462084), (37.44747234260555, 55.87942070143253), (37.44716141796871, 55.88072960917233), (37.44769797085568, 55.88121221323979), (37.45204320500181, 55.882080694420715), (37.45673176190186, 55.882346110794586), (37.463383999999984, 55.88252729504517), (37.46682797486874, 55.88294937719063), (37.470014457672086, 55.88361266759345), (37.47751410450743, 55.88546991372396), (37.47860317658232, 55.88534929207307), (37.48165826025772, 55.882563306475106), (37.48316434442331, 55.8815803226785), (37.483831555817645, 55.882427612793315), (37.483182967125686, 55.88372791409729), (37.483092277908824, 55.88495581062434), (37.4855716508179, 55.8875561994203), (37.486440636245746, 55.887827444039566), (37.49014203439328, 55.88897899871799), (37.493210285705544, 55.890208937135604), (37.497512451065035, 55.891342397444696), (37.49780744510645, 55.89174030252967), (37.49940333499519, 55.89239745507079), (37.50018383334346, 55.89339220941865), (37.52421672750851, 55.903869074155224), (37.52977457672118, 55.90564076517974), (37.53503220370484, 55.90661661218259), (37.54042858064267, 55.90714113744566), (37.54320461007303, 55.905645048442985), (37.545686966066306, 55.906608607018505), (37.54743976120755, 55.90788552162358), (37.55796999999999, 55.90901557907218), (37.572711542327866, 55.91059395704873), (37.57942799999998, 55.91073854155573), (37.58502865872187, 55.91009969268444), (37.58739968913264, 55.90794809960554), (37.59131567193598, 55.908713267595054), (37.612687423278814, 55.902866854295375), (37.62348079629517, 55.90041967242986), (37.635797880950896, 55.898141151686396), (37.649487626983664, 55.89639275532968), (37.65619302513125, 55.89572360207488), (37.66294133862307, 55.895295577183965), (37.66874564418033, 55.89505457604897), (37.67375601586915, 55.89254677027454), (37.67744661901856, 55.8947775867987), (37.688347, 55.89450045676125), (37.69480554232789, 55.89422926332761), (37.70107096560668, 55.89322256101114), (37.705962965606716, 55.891763491662616), (37.711885134918205, 55.889110234998974), (37.71682005026245, 55.886577568759876), (37.7199315476074, 55.88458159806678), (37.72234560316464, 55.882281005794134), (37.72364385977171, 55.8809452036196), (37.725371142837474, 55.8809722706006), (37.727870902099546, 55.88037213862385), (37.73394330422971, 55.877941504088696), (37.745339592590376, 55.87208120378722), (37.75525267724611, 55.86703807949492), (37.76919976190188, 55.859821640197474), (37.827835219574, 55.82962968399116), (37.83341438888553, 55.82575289922351), (37.83652584655761, 55.82188784027888), (37.83809213491821, 55.81612575504693), (37.83605359521481, 55.81460347077685), (37.83632178569025, 55.81276696067908), (37.838623105812026, 55.811486181656385), (37.83912198147584, 55.807329380532785), (37.839079078033414, 55.80510270463816), (37.83965844708251, 55.79940712529036), (37.840581150787344, 55.79131399999368), (37.84172564285271, 55.78000432402266)]); ``` -Check how many cell towers are in Moscow: +3. Check how many cell towers are in Moscow: ``` SELECT count() FROM cell_towers WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow)) @@ -128,6 +129,4 @@ SELECT count() FROM cell_towers WHERE pointInPolygon((lon, lat), (SELECT * FROM The data is also available for interactive queries in the [Playground](https://gh-api.clickhouse.tech/play?user=play), [example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=). -Although you cannot create temporary tables there. - -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/cell-towers/) +Although you cannot create temporary tables there. \ No newline at end of file diff --git a/docs/en/getting-started/example-datasets/ontime.md b/docs/en/getting-started/example-datasets/ontime.md index 83673cdceb6..f18acc6fd50 100644 --- a/docs/en/getting-started/example-datasets/ontime.md +++ b/docs/en/getting-started/example-datasets/ontime.md @@ -21,120 +21,121 @@ echo https://transtats.bts.gov/PREZIP/On_Time_Reporting_Carrier_On_Time_Performa Creating a table: ``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String +CREATE TABLE `ontime` +( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `Reporting_Airline` String, + `DOT_ID_Reporting_Airline` Int32, + `IATA_CODE_Reporting_Airline` String, + `Tail_Number` Int32, + `Flight_Number_Reporting_Airline` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Nullable(Int32), + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String ) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; + PARTITION BY Year + ORDER BY (IATA_CODE_Reporting_Airline, FlightDate) + SETTINGS index_granularity = 8192; ``` Loading data with multiple threads: @@ -206,7 +207,7 @@ LIMIT 10; Q4. The number of delays by carrier for 2007 ``` sql -SELECT Carrier, count(*) +SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier @@ -220,29 +221,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` Better version of the same query: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year=2007 GROUP BY Carrier @@ -256,29 +257,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` Better version of the same query: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier @@ -297,7 +298,7 @@ FROM from ontime WHERE DepDelay>10 GROUP BY Year -) +) q JOIN ( select @@ -305,7 +306,7 @@ JOIN count(*) as c2 from ontime GROUP BY Year -) USING (Year) +) qq USING (Year) ORDER BY Year; ``` @@ -340,7 +341,7 @@ Q10. ``` sql SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, + min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, sum(ArrDelayMinutes>30) AS flights_delayed, round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index a8753de6abd..9a4848a3ef0 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -102,7 +102,9 @@ For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse buil - [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` - [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` -After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. To run `clickhouse server`, you have to additionally download [server](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) and [users](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) configuration files from GitHub. +After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. + +Run `sudo ./clickhouse install` if you want to install clickhouse system-wide (also with needed configuration files, configuring users etc.). After that run `clickhouse start` commands to start the clickhouse-server and `clickhouse-client` to connect to it. These builds are not recommended for use in production environments because they are less thoroughly tested, but you can do so on your own risk. They also have only a subset of ClickHouse features available. @@ -130,7 +132,7 @@ To start the server as a daemon, run: $ sudo service clickhouse-server start ``` -If you don’t have `service` command, run as +If you do not have `service` command, run as ``` bash $ sudo /etc/init.d/clickhouse-server start @@ -138,7 +140,7 @@ $ sudo /etc/init.d/clickhouse-server start See the logs in the `/var/log/clickhouse-server/` directory. -If the server doesn’t start, check the configurations in the file `/etc/clickhouse-server/config.xml`. +If the server does not start, check the configurations in the file `/etc/clickhouse-server/config.xml`. You can also manually launch the server from the console: @@ -147,7 +149,7 @@ $ clickhouse-server --config-file=/etc/clickhouse-server/config.xml ``` In this case, the log will be printed to the console, which is convenient during development. -If the configuration file is in the current directory, you don’t need to specify the `--config-file` parameter. By default, it uses `./config.xml`. +If the configuration file is in the current directory, you do not need to specify the `--config-file` parameter. By default, it uses `./config.xml`. ClickHouse supports access restriction settings. They are located in the `users.xml` file (next to `config.xml`). By default, access is allowed from anywhere for the `default` user, without a password. See `user/default/networks`. diff --git a/docs/en/getting-started/tutorial.md b/docs/en/getting-started/tutorial.md index fe697972dff..694a82e100e 100644 --- a/docs/en/getting-started/tutorial.md +++ b/docs/en/getting-started/tutorial.md @@ -105,7 +105,7 @@ Syntax for creating tables is way more complicated compared to databases (see [r 2. Table schema, i.e. list of columns and their [data types](../sql-reference/data-types/index.md). 3. [Table engine](../engines/table-engines/index.md) and its settings, which determines all the details on how queries to this table will be physically executed. -Yandex.Metrica is a web analytics service, and sample dataset doesn’t cover its full functionality, so there are only two tables to create: +Yandex.Metrica is a web analytics service, and sample dataset does not cover its full functionality, so there are only two tables to create: - `hits` is a table with each action done by all users on all websites covered by the service. - `visits` is a table that contains pre-built sessions instead of individual actions. diff --git a/docs/en/guides/apply-catboost-model.md b/docs/en/guides/apply-catboost-model.md index f614b121714..ec3ecc92141 100644 --- a/docs/en/guides/apply-catboost-model.md +++ b/docs/en/guides/apply-catboost-model.md @@ -20,7 +20,7 @@ For more information about training CatBoost models, see [Training and applying ## Prerequisites {#prerequisites} -If you don’t have the [Docker](https://docs.docker.com/install/) yet, install it. +If you do not have the [Docker](https://docs.docker.com/install/) yet, install it. !!! note "Note" [Docker](https://www.docker.com) is a software platform that allows you to create containers that isolate a CatBoost and ClickHouse installation from the rest of the system. @@ -159,6 +159,9 @@ The fastest way to evaluate a CatBoost model is compile `libcatboostmodel./home/catboost/models/*_model.xml ``` +!!! note "Note" + You can change path to the CatBoost model configuration later without restarting server. + ## 4. Run the Model Inference from SQL {#run-model-inference} For test model run the ClickHouse client `$ clickhouse client`. diff --git a/docs/en/index.md b/docs/en/index.md index 676fd444995..12e72ebdf3b 100644 --- a/docs/en/index.md +++ b/docs/en/index.md @@ -54,7 +54,7 @@ The higher the load on the system, the more important it is to customize the sys - There is one large table per query. All tables are small, except for one. - A query result is significantly smaller than the source data. In other words, data is filtered or aggregated, so the result fits in a single server’s RAM. -It is easy to see that the OLAP scenario is very different from other popular scenarios (such as OLTP or Key-Value access). So it doesn’t make sense to try to use OLTP or a Key-Value DB for processing analytical queries if you want to get decent performance. For example, if you try to use MongoDB or Redis for analytics, you will get very poor performance compared to OLAP databases. +It is easy to see that the OLAP scenario is very different from other popular scenarios (such as OLTP or Key-Value access). So it does not make sense to try to use OLTP or a Key-Value DB for processing analytical queries if you want to get decent performance. For example, if you try to use MongoDB or Redis for analytics, you will get very poor performance compared to OLAP databases. ## Why Column-Oriented Databases Work Better in the OLAP Scenario {#why-column-oriented-databases-work-better-in-the-olap-scenario} @@ -80,15 +80,15 @@ For example, the query “count the number of records for each advertising platf ### CPU {#cpu} -Since executing a query requires processing a large number of rows, it helps to dispatch all operations for entire vectors instead of for separate rows, or to implement the query engine so that there is almost no dispatching cost. If you don’t do this, with any half-decent disk subsystem, the query interpreter inevitably stalls the CPU. It makes sense to both store data in columns and process it, when possible, by columns. +Since executing a query requires processing a large number of rows, it helps to dispatch all operations for entire vectors instead of for separate rows, or to implement the query engine so that there is almost no dispatching cost. If you do not do this, with any half-decent disk subsystem, the query interpreter inevitably stalls the CPU. It makes sense to both store data in columns and process it, when possible, by columns. There are two ways to do this: -1. A vector engine. All operations are written for vectors, instead of for separate values. This means you don’t need to call operations very often, and dispatching costs are negligible. Operation code contains an optimized internal cycle. +1. A vector engine. All operations are written for vectors, instead of for separate values. This means you do not need to call operations very often, and dispatching costs are negligible. Operation code contains an optimized internal cycle. 2. Code generation. The code generated for the query has all the indirect calls in it. -This is not done in “normal” databases, because it doesn’t make sense when running simple queries. However, there are exceptions. For example, MemSQL uses code generation to reduce latency when processing SQL queries. (For comparison, analytical DBMSs require optimization of throughput, not latency.) +This is not done in “normal” databases, because it does not make sense when running simple queries. However, there are exceptions. For example, MemSQL uses code generation to reduce latency when processing SQL queries. (For comparison, analytical DBMSs require optimization of throughput, not latency.) Note that for CPU efficiency, the query language must be declarative (SQL or MDX), or at least a vector (J, K). The query should only contain implicit loops, allowing for optimization. diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 7e072e366dc..8457ea41857 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -66,7 +66,7 @@ When processing a query, the client shows: 3. The result in the specified format. 4. The number of lines in the result, the time passed, and the average speed of query processing. -You can cancel a long query by pressing Ctrl+C. However, you will still need to wait for a little for the server to abort the request. It is not possible to cancel a query at certain stages. If you don’t wait and press Ctrl+C a second time, the client will exit. +You can cancel a long query by pressing Ctrl+C. However, you will still need to wait for a little for the server to abort the request. It is not possible to cancel a query at certain stages. If you do not wait and press Ctrl+C a second time, the client will exit. The command-line client allows passing external data (external temporary tables) for querying. For more information, see the section “External data for query processing”. diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 5987ba0f676..c616d843173 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -662,7 +662,7 @@ ClickHouse allows: - Any order of key-value pairs in the object. - Omitting some values. -ClickHouse ignores spaces between elements and commas after the objects. You can pass all the objects in one line. You don’t have to separate them with line breaks. +ClickHouse ignores spaces between elements and commas after the objects. You can pass all the objects in one line. You do not have to separate them with line breaks. **Omitted values processing** @@ -770,9 +770,9 @@ SELECT * FROM json_each_row_nested ## Native {#native} -The most efficient format. Data is written and read by blocks in binary format. For each block, the number of rows, number of columns, column names and types, and parts of columns in this block are recorded one after another. In other words, this format is “columnar” – it doesn’t convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients. +The most efficient format. Data is written and read by blocks in binary format. For each block, the number of rows, number of columns, column names and types, and parts of columns in this block are recorded one after another. In other words, this format is “columnar” – it does not convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients. -You can use this format to quickly generate dumps that can only be read by the ClickHouse DBMS. It doesn’t make sense to work with this format yourself. +You can use this format to quickly generate dumps that can only be read by the ClickHouse DBMS. It does not make sense to work with this format yourself. ## Null {#null} @@ -1039,7 +1039,7 @@ struct Message { } ``` -Deserialization is effective and usually doesn’t increase the system load. +Deserialization is effective and usually does not increase the system load. See also [Format Schema](#formatschema). @@ -1312,7 +1312,7 @@ ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` q Unsupported ORC data types: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -The data types of ClickHouse table columns don’t have to match the corresponding ORC data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) the data to the data type set for the ClickHouse table column. +The data types of ClickHouse table columns do not have to match the corresponding ORC data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) the data to the data type set for the ClickHouse table column. ### Inserting Data {#inserting-data-2} diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index 18533cfc6c2..dec3c839020 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -52,7 +52,7 @@ X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","writ ``` As you can see, curl is somewhat inconvenient in that spaces must be URL escaped. -Although wget escapes everything itself, we don’t recommend using it because it doesn’t work well over HTTP 1.1 when using keep-alive and Transfer-Encoding: chunked. +Although wget escapes everything itself, we do not recommend using it because it does not work well over HTTP 1.1 when using keep-alive and Transfer-Encoding: chunked. ``` bash $ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @- @@ -146,7 +146,7 @@ Deleting the table. $ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- ``` -For successful requests that don’t return a data table, an empty response body is returned. +For successful requests that do not return a data table, an empty response body is returned. ## Compression {#compression} @@ -273,7 +273,7 @@ Possible header fields: - `written_rows` — Number of rows written. - `written_bytes` — Volume of data written in bytes. -Running requests don’t stop automatically if the HTTP connection is lost. Parsing and data formatting are performed on the server-side, and using the network might be ineffective. +Running requests do not stop automatically if the HTTP connection is lost. Parsing and data formatting are performed on the server-side, and using the network might be ineffective. The optional ‘query_id’ parameter can be passed as the query ID (any string). For more information, see the section “Settings, replace_running_query”. The optional ‘quota_key’ parameter can be passed as the quota key (any string). For more information, see the section “Quotas”. diff --git a/docs/en/interfaces/third-party/client-libraries.md b/docs/en/interfaces/third-party/client-libraries.md index c08eec61b1c..f5c85289171 100644 --- a/docs/en/interfaces/third-party/client-libraries.md +++ b/docs/en/interfaces/third-party/client-libraries.md @@ -23,6 +23,7 @@ toc_title: Client Libraries - [SeasClick C++ client](https://github.com/SeasX/SeasClick) - [one-ck](https://github.com/lizhichao/one-ck) - [glushkovds/phpclickhouse-laravel](https://packagist.org/packages/glushkovds/phpclickhouse-laravel) + - [kolya7k ClickHouse PHP extension](https://github.com//kolya7k/clickhouse-php) - Go - [clickhouse](https://github.com/kshvakov/clickhouse/) - [go-clickhouse](https://github.com/roistat/go-clickhouse) diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index 5d14b3aa3cc..fffe0c87a53 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -169,19 +169,23 @@ Features: ### SeekTable {#seektable} -[SeekTable](https://www.seektable.com) is a self-service BI tool for data exploration and operational reporting. SeekTable is available both as a cloud service and a self-hosted version. SeekTable reports may be embedded into any web-app. +[SeekTable](https://www.seektable.com) is a self-service BI tool for data exploration and operational reporting. It is available both as a cloud service and a self-hosted version. Reports from SeekTable may be embedded into any web-app. Features: - Business users-friendly reports builder. - Powerful report parameters for SQL filtering and report-specific query customizations. - Can connect to ClickHouse both with a native TCP/IP endpoint and a HTTP(S) interface (2 different drivers). -- It is possible to use all power of CH SQL dialect in dimensions/measures definitions +- It is possible to use all power of ClickHouse SQL dialect in dimensions/measures definitions. - [Web API](https://www.seektable.com/help/web-api-integration) for automated reports generation. -- Supports reports development flow with account data [backup/restore](https://www.seektable.com/help/self-hosted-backup-restore), data models (cubes) / reports configuration is a human-readable XML and can be stored under version control. +- Supports reports development flow with account data [backup/restore](https://www.seektable.com/help/self-hosted-backup-restore); data models (cubes) / reports configuration is a human-readable XML and can be stored under version control system. SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/individual usage. [How to configure ClickHouse connection in SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table) +### Chadmin {#chadmin} + +[Chadmin](https://github.com/bun4uk/chadmin) is a simple UI where you can visualize your currently running queries on your ClickHouse cluster and info about them and kill them if you want. + [Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 23f7b596851..fa257a84173 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -12,9 +12,13 @@ toc_title: Adopters |---------|----------|---------|--------------|------------------------------------------------------------------------------|-----------| | 2gis | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) | | Admiral | Martech | Engagement Management | — | — | [Webinar Slides, June 2020](https://altinity.com/presentations/2020/06/16/big-data-in-real-time-how-clickhouse-powers-admirals-visitor-relationships-for-publishers) | +| AdScribe | Ads | TV Analytics | — | — | [A quote from CTO](https://altinity.com/24x7-support/) | +| Ahrefs | SEO | Analytics | — | — | [Job listing](https://ahrefs.com/jobs/data-scientist-search) | | Alibaba Cloud | Cloud | Managed Service | — | — | [Official Website](https://help.aliyun.com/product/144466.html) | | Aloha Browser | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://presentations.clickhouse.tech/meetup22/aloha.pdf) | +| Altinity | Cloud, SaaS | Main product | — | — | [Official Website](https://altinity.com/) | | Amadeus | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | +| ApiRoad | API marketplace | Analytics | — | — | [Blog post, Nov 2018, Mar 2020](https://pixeljets.com/blog/clickhouse-vs-elasticsearch/) | | Appsflyer | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | | ArenaData | Data Platform | Main product | — | — | [Slides in Russian, December 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | | Avito | Classifieds | Monitoring | — | — | [Meetup, April 2020](https://www.youtube.com/watch?v=n1tm4j4W8ZQ) | @@ -37,23 +41,27 @@ toc_title: Adopters | CraiditX 氪信 | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | | Crazypanda | Games | | — | — | Live session on ClickHouse meetup | | Criteo | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| Cryptology | Digital Assets Trading Platform | — | — | — | [Job advertisement, March 2021](https://career.habr.com/companies/cryptology/vacancies) | | Dataliance for China Telecom | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | | Deutsche Bank | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | | Deeplay | Gaming Analytics | — | — | — | [Job advertisement, 2020](https://career.habr.com/vacancies/1000062568) | | Diva-e | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | | Ecwid | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) | | eBay | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) | -| Exness | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| Exness | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| EventBunker.io | Serverless Data Processing | — | — | — | [Tweet, April 2021](https://twitter.com/Halil_D_/status/1379839133472985091) | | FastNetMon | DDoS Protection | Main Product | | — | [Official website](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/) | | Flipkart | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) | | FunCorp | Games | | — | 14 bn records/day as of Jan 2021 | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) | | Geniee | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | Genotek | Bioinformatics | Main product | — | — | [Video, August 2020](https://youtu.be/v3KyZbz9lEE) | +| Glaber | Monitoring | Main product | — | — | [Website](https://glaber.io/) | | HUYA | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | | ICA | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) | | Idealista | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | Infovista | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | InnoGames | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| Instabug | APM Platform | Main product | — | — | [A quote from Co-Founder](https://altinity.com/) | | Instana | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) | | Integros | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | | Ippon Technologies | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) | @@ -65,15 +73,20 @@ toc_title: Adopters | Lawrence Berkeley National Laboratory | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) | | LifeStreet | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) | | Mail.ru Cloud Solutions | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) | +| MAXILECT | Ad Tech, Blockchain, ML, AI | — | — | — | [Job advertisement, 2021](https://www.linkedin.com/feed/update/urn:li:activity:6780842017229430784/) | | Marilyn | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) | | Mello | Marketing | Analytics | 1 server | — | [Article, Oct 2020](https://vc.ru/marketing/166180-razrabotka-tipovogo-otcheta-skvoznoy-analitiki) | | MessageBird | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| MindsDB | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) |x +| Microsoft | Web Analytics | Clarity (Main Product) | — | — | [A question on GitHub](https://github.com/ClickHouse/ClickHouse/issues/21556) | +| MindsDB | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) | | MUX | Online Video | Video Analytics | — | — | [Talk in English, August 2019](https://altinity.com/presentations/2019/8/13/how-clickhouse-became-the-default-analytics-database-for-mux/) | | MGID | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| Netskope | Network Security | — | — | — | [Job advertisement, March 2021](https://www.mendeley.com/careers/job/senior-software-developer-backend-developer-1346348) | +| NIC Labs | Network Monitoring | RaTA-DNS | — | — | [Blog post, March 2021](https://niclabs.cl/ratadns/2021/03/Clickhouse) | | NOC Project | Network Monitoring | Analytics | Main Product | — | [Official Website](https://getnoc.com/features/big-data/) | | Nuna Inc. | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) | | OneAPM | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| OZON | E-commerce | — | — | — | [Official website](https://job.ozon.ru/vacancy/razrabotchik-clickhouse-ekspluatatsiya-40991870/) | | Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) | | Percent 百分点 | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | | Percona | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) | @@ -90,14 +103,17 @@ toc_title: Adopters | Rspamd | Antispam | Analytics | — | — | [Official Website](https://rspamd.com/doc/modules/clickhouse.html) | | RuSIEM | SIEM | Main Product | — | — | [Official Website](https://rusiem.com/en/products/architecture) | | S7 Airlines | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | +| Sber | Banking, Fintech, Retail, Cloud, Media | — | — | — | [Job advertisement, March 2021](https://career.habr.com/vacancies/1000073536) | | scireum GmbH | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | | Segment | Data processing | Main product | 9 * i3en.3xlarge nodes 7.5TB NVME SSDs, 96GB Memory, 12 vCPUs | — | [Slides, 2019](https://slides.com/abraithwaite/segment-clickhouse) | +| sembot.io | Shopping Ads | — | — | — | A comment on LinkedIn, 2020 | | SEMrush | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | | Sentry | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | | seo.do | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | | SGK | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | | Sina | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | | SMI2 | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) | +| Spark New Zealand | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) | | Splunk | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | | Spotify | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | | Staffcop | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) | @@ -106,23 +122,31 @@ toc_title: Adopters | Tencent | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | | Tencent | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | | Tencent Music Entertainment (TME) | BigData | Data processing | — | — | [Blog in Chinese, June 2020](https://cloud.tencent.com/developer/article/1637840) | +| Tinybird | Real-time Data Products | Data processing | — | — | [Official website](https://www.tinybird.co/) | | Traffic Stars | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | | Uber | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| UTMSTAT | Analytics | Main product | — | — | [Blog post, June 2020](https://vc.ru/tribuna/133956-striming-dannyh-iz-servisa-skvoznoy-analitiki-v-clickhouse) | | VKontakte | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| VMWare | Cloud | VeloCloud, SDN | — | — | [Product documentation](https://docs.vmware.com/en/vRealize-Operations-Manager/8.3/com.vmware.vcom.metrics.doc/GUID-A9AD72E1-C948-4CA2-971B-919385AB3CA8.html) | | Walmart Labs | Internet, Retail | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=144) | | Wargaming | Games | | — | — | [Interview](https://habr.com/en/post/496954/) | +| Wildberries | E-commerce | | — | — | [Official website](https://it.wildberries.ru/) | | Wisebits | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | | Workato | Automation Software | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=334) | +| Xenoss | Marketing, Advertising | — | — | — | [Instagram, March 2021](https://www.instagram.com/p/CNATV7qBgB1/) | | Xiaoxin Tech | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | | Ximalaya | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | | Yandex Cloud | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | | Yandex DataLens | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | | Yandex Market | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) | | Yandex Metrica | Web analytics | Main product | 630 servers in one cluster, 360 servers in another cluster, 1862 servers in one department | 133 PiB / 8.31 PiB / 120 trillion records | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | +| Yotascale | Cloud | Data pipeline | — | 2 bn records/day | [LinkedIn (Accomplishments)](https://www.linkedin.com/in/adilsaleem/) | | ЦВТ | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | | МКБ | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | | ЦФТ | Banking, Financial products, Payments | — | — | — | [Meetup in Russian, April 2020](https://team.cft.ru/events/162) | +| Цифровой Рабочий | Industrial IoT, Analytics | — | — | — | [Blog post in Russian, March 2021](https://habr.com/en/company/croc/blog/548018/) | | kakaocorp | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) | +| ООО «МПЗ Богородский» | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) | | Tesla | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) | [Original article](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/en/introduction/distinctive-features.md b/docs/en/introduction/distinctive-features.md index be7c2d2e7c1..ee9f9484c67 100644 --- a/docs/en/introduction/distinctive-features.md +++ b/docs/en/introduction/distinctive-features.md @@ -61,7 +61,7 @@ Unlike other database management systems, secondary indexes in ClickHouse does n ## Suitable for Online Queries {#suitable-for-online-queries} -Most OLAP database management systems don’t aim for online queries with sub-second latencies. In alternative systems, report building time of tens of seconds or even minutes is often considered acceptable. Sometimes it takes even more which forces to prepare reports offline (in advance or by responding with “come back later”). +Most OLAP database management systems do not aim for online queries with sub-second latencies. In alternative systems, report building time of tens of seconds or even minutes is often considered acceptable. Sometimes it takes even more which forces to prepare reports offline (in advance or by responding with “come back later”). In ClickHouse low latency means that queries can be processed without delay and without trying to prepare an answer in advance, right at the same moment while the user interface page is loading. In other words, online. diff --git a/docs/en/operations/access-rights.md b/docs/en/operations/access-rights.md index 32f8fdcb642..8d48218f417 100644 --- a/docs/en/operations/access-rights.md +++ b/docs/en/operations/access-rights.md @@ -31,7 +31,7 @@ To see all users, roles, profiles, etc. and all their grants use [SHOW ACCESS](. ## Usage {#access-control-usage} -By default, the ClickHouse server provides the `default` user account which is not allowed using SQL-driven access control and account management but has all the rights and permissions. The `default` user account is used in any cases when the username is not defined, for example, at login from client or in distributed queries. In distributed query processing a default user account is used, if the configuration of the server or cluster doesn’t specify the [user and password](../engines/table-engines/special/distributed.md) properties. +By default, the ClickHouse server provides the `default` user account which is not allowed using SQL-driven access control and account management but has all the rights and permissions. The `default` user account is used in any cases when the username is not defined, for example, at login from client or in distributed queries. In distributed query processing a default user account is used, if the configuration of the server or cluster does not specify the [user and password](../engines/table-engines/special/distributed.md) properties. If you just started using ClickHouse, consider the following scenario: @@ -101,6 +101,9 @@ Privileges can be granted to a role by the [GRANT](../sql-reference/statements/g Row policy is a filter that defines which of the rows are available to a user or a role. Row policy contains filters for one particular table, as well as a list of roles and/or users which should use this row policy. +!!! note "Warning" + Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies. + Management queries: - [CREATE ROW POLICY](../sql-reference/statements/create/row-policy.md) diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index f4206f5d70c..9c8f5389ccd 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -5,7 +5,7 @@ toc_title: Data Backup # Data Backup {#data-backup} -While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards don’t cover all possible cases and can be circumvented. +While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented. In order to effectively mitigate possible human errors, you should carefully prepare a strategy for backing up and restoring your data **in advance**. @@ -30,7 +30,7 @@ For smaller volumes of data, a simple `INSERT INTO ... SELECT ...` to remote tab ## Manipulations with Parts {#manipulations-with-parts} -ClickHouse allows using the `ALTER TABLE ... FREEZE PARTITION ...` query to create a local copy of table partitions. This is implemented using hardlinks to the `/var/lib/clickhouse/shadow/` folder, so it usually does not consume extra disk space for old data. The created copies of files are not handled by ClickHouse server, so you can just leave them there: you will have a simple backup that doesn’t require any additional external system, but it will still be prone to hardware issues. For this reason, it’s better to remotely copy them to another location and then remove the local copies. Distributed filesystems and object stores are still a good options for this, but normal attached file servers with a large enough capacity might work as well (in this case the transfer will occur via the network filesystem or maybe [rsync](https://en.wikipedia.org/wiki/Rsync)). +ClickHouse allows using the `ALTER TABLE ... FREEZE PARTITION ...` query to create a local copy of table partitions. This is implemented using hardlinks to the `/var/lib/clickhouse/shadow/` folder, so it usually does not consume extra disk space for old data. The created copies of files are not handled by ClickHouse server, so you can just leave them there: you will have a simple backup that does not require any additional external system, but it will still be prone to hardware issues. For this reason, it’s better to remotely copy them to another location and then remove the local copies. Distributed filesystems and object stores are still a good options for this, but normal attached file servers with a large enough capacity might work as well (in this case the transfer will occur via the network filesystem or maybe [rsync](https://en.wikipedia.org/wiki/Rsync)). Data can be restored from backup using the `ALTER TABLE ... ATTACH PARTITION ...` For more information about queries related to partition manipulations, see the [ALTER documentation](../sql-reference/statements/alter/partition.md#alter_manipulations-with-partitions). diff --git a/docs/en/operations/configuration-files.md b/docs/en/operations/configuration-files.md index 9864efd648a..96009c75af1 100644 --- a/docs/en/operations/configuration-files.md +++ b/docs/en/operations/configuration-files.md @@ -5,9 +5,9 @@ toc_title: Configuration Files # Configuration Files {#configuration_files} -ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. +ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml` or `/etc/clickhouse-server/config.yaml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. Note, that any configuration file can be written either in XML or YAML, but mixing formats in one file is not supported. For example, you can have main configs as `config.xml` and `users.xml` and write additional files in `config.d` and `users.d` directories in `.yaml`. -All the configuration files should be in XML format. Also, they should have the same root element, usually ``. +All the configuration files should be in XML or YAML formats. All XML files should have the same root element, usually ``. As for YAML, `yandex:` should not be present, the parser will insert it automatically. ## Override {#override} @@ -32,7 +32,7 @@ Users configuration can be splitted into separate files similar to `config.xml` Directory name is defined as `users_config` setting without `.xml` postfix concatenated with `.d`. Directory `users.d` is used by default, as `users_config` defaults to `users.xml`. -## Example {#example} +## XML example {#example} For example, you can have separate config file for each user like this: @@ -55,6 +55,70 @@ $ cat /etc/clickhouse-server/users.d/alice.xml ``` +## YAML examples {#example} + +Here you can see default config written in YAML: [config.yaml.example](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.yaml.example). + +There are some differences between YAML and XML formats in terms of ClickHouse configurations. Here are some tips for writing a configuration in YAML format. + +You should use a Scalar node to write a key-value pair: +``` yaml +key: value +``` + +To create a node, containing other nodes you should use a Map: +``` yaml +map_key: + key1: val1 + key2: val2 + key3: val3 +``` + +To create a list of values or nodes assigned to one tag you should use a Sequence: +``` yaml +seq_key: + - val1 + - val2 + - key1: val3 + - map: + key2: val4 + key3: val5 +``` + +If you want to write an attribute for a Sequence or Map node, you should use a @ prefix before the attribute key. Note, that @ is reserved by YAML standard, so you should also to wrap it into double quotes: + +``` yaml +map: + "@attr1": value1 + "@attr2": value2 + key: 123 +``` + +From that Map we will get these XML nodes: + +``` xml + + 123 + +``` + +You can also set attributes for Sequence: + +``` yaml +seq: + - "@attr1": value1 + - "@attr2": value2 + - 123 + - abc +``` + +So, we can get YAML config equal to this XML one: + +``` xml +123 +abc +``` + ## Implementation Details {#implementation-details} For each config file, the server also generates `file-preprocessed.xml` files when starting. These files contain all the completed substitutions and overrides, and they are intended for informational use. If ZooKeeper substitutions were used in the config files but ZooKeeper is not available on the server start, the server loads the configuration from the preprocessed file. diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index 1b65ecc968b..805d45e1b38 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -17,6 +17,7 @@ To define LDAP server you must add `ldap_servers` section to the `config.xml`. + localhost 636 @@ -31,6 +32,18 @@ To define LDAP server you must add `ldap_servers` section to the `config.xml`. /path/to/tls_ca_cert_dir ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384 + + + + localhost + 389 + EXAMPLE\{user_name} + + CN=Users,DC=example,DC=com + (&(objectClass=user)(sAMAccountName={user_name})) + + no + ``` @@ -43,6 +56,15 @@ Note, that you can define multiple LDAP servers inside the `ldap_servers` sectio - `port` — LDAP server port, default is `636` if `enable_tls` is set to `true`, `389` otherwise. - `bind_dn` — Template used to construct the DN to bind to. - The resulting DN will be constructed by replacing all `{user_name}` substrings of the template with the actual user name during each authentication attempt. +- `user_dn_detection` - Section with LDAP search parameters for detecting the actual user DN of the bound user. + - This is mainly used in search filters for further role mapping when the server is Active Directory. The resulting user DN will be used when replacing `{user_dn}` substrings wherever they are allowed. By default, user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected user DN value. + - `base_dn` - Template used to construct the base DN for the LDAP search. + - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` substrings of the template with the actual user name and bind DN during the LDAP search. + - `scope` - Scope of the LDAP search. + - Accepted values are: `base`, `one_level`, `children`, `subtree` (the default). + - `search_filter` - Template used to construct the search filter for the LDAP search. + - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` substrings of the template with the actual user name, bind DN, and base DN during the LDAP search. + - Note, that the special characters must be escaped properly in XML. - `verification_cooldown` — A period of time, in seconds, after a successful bind attempt, during which the user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server. - Specify `0` (the default) to disable caching and force contacting the LDAP server for each authentication request. - `enable_tls` — A flag to trigger the use of the secure connection to the LDAP server. @@ -107,7 +129,7 @@ Goes into `config.xml`. - + my_ldap_server @@ -122,6 +144,18 @@ Goes into `config.xml`. clickhouse_ + + + + my_ad_server + + CN=Users,DC=example,DC=com + CN + subtree + (&(objectClass=group)(member={user_dn})) + clickhouse_ + + ``` @@ -137,13 +171,13 @@ Note that `my_ldap_server` referred in the `ldap` section inside the `user_direc - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged-in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. - There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied. - `base_dn` — Template used to construct the base DN for the LDAP search. - - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` substrings of the template with the actual user name and bind DN during each LDAP search. + - The resulting DN will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{user_dn}` substrings of the template with the actual user name, bind DN, and user DN during each LDAP search. - `scope` — Scope of the LDAP search. - Accepted values are: `base`, `one_level`, `children`, `subtree` (the default). - `search_filter` — Template used to construct the search filter for the LDAP search. - - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}` and `{base_dn}` substrings of the template with the actual user name, bind DN and base DN during each LDAP search. + - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, `{user_dn}`, and `{base_dn}` substrings of the template with the actual user name, bind DN, user DN, and base DN during each LDAP search. - Note, that the special characters must be escaped properly in XML. - - `attribute` — Attribute name whose values will be returned by the LDAP search. + - `attribute` — Attribute name whose values will be returned by the LDAP search. `cn`, by default. - `prefix` — Prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. The prefix will be removed from the original strings and the resulting strings will be treated as local role names. Empty by default. [Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap/) diff --git a/docs/en/operations/optimizing-performance/sampling-query-profiler.md b/docs/en/operations/optimizing-performance/sampling-query-profiler.md index 0c075180530..9244592d515 100644 --- a/docs/en/operations/optimizing-performance/sampling-query-profiler.md +++ b/docs/en/operations/optimizing-performance/sampling-query-profiler.md @@ -11,13 +11,13 @@ To use profiler: - Setup the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) section of the server configuration. - This section configures the [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table containing the results of the profiler functioning. It is configured by default. Remember that data in this table is valid only for a running server. After the server restart, ClickHouse doesn’t clean up the table and all the stored virtual memory address may become invalid. + This section configures the [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table containing the results of the profiler functioning. It is configured by default. Remember that data in this table is valid only for a running server. After the server restart, ClickHouse does not clean up the table and all the stored virtual memory address may become invalid. - Setup the [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns) or [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns) settings. Both settings can be used simultaneously. These settings allow you to configure profiler timers. As these are the session settings, you can get different sampling frequency for the whole server, individual users or user profiles, for your interactive session, and for each individual query. -The default sampling frequency is one sample per second and both CPU and real timers are enabled. This frequency allows collecting enough information about ClickHouse cluster. At the same time, working with this frequency, profiler doesn’t affect ClickHouse server’s performance. If you need to profile each individual query try to use higher sampling frequency. +The default sampling frequency is one sample per second and both CPU and real timers are enabled. This frequency allows collecting enough information about ClickHouse cluster. At the same time, working with this frequency, profiler does not affect ClickHouse server’s performance. If you need to profile each individual query try to use higher sampling frequency. To analyze the `trace_log` system table: diff --git a/docs/en/operations/performance-test.md b/docs/en/operations/performance-test.md index ca805923ba9..a808ffd0a85 100644 --- a/docs/en/operations/performance-test.md +++ b/docs/en/operations/performance-test.md @@ -12,6 +12,7 @@ With this instruction you can run basic ClickHouse performance test on any serve 3. Copy the link to `clickhouse` binary for amd64 or aarch64. 4. ssh to the server and download it with wget: ```bash +# These links are outdated, please obtain the fresh link from the "commits" page. # For amd64: wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse # For aarch64: diff --git a/docs/en/operations/quotas.md b/docs/en/operations/quotas.md index 56c3eaf6455..bbea735cdba 100644 --- a/docs/en/operations/quotas.md +++ b/docs/en/operations/quotas.md @@ -72,7 +72,7 @@ The resource consumption calculated for each interval is output to the server lo ``` -For the ‘statbox’ quota, restrictions are set for every hour and for every 24 hours (86,400 seconds). The time interval is counted, starting from an implementation-defined fixed moment in time. In other words, the 24-hour interval doesn’t necessarily begin at midnight. +For the ‘statbox’ quota, restrictions are set for every hour and for every 24 hours (86,400 seconds). The time interval is counted, starting from an implementation-defined fixed moment in time. In other words, the 24-hour interval does not necessarily begin at midnight. When the interval ends, all collected values are cleared. For the next hour, the quota calculation starts over. diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 89fcbafe663..801a1d27add 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -100,6 +100,11 @@ Default value: `1073741824` (1 GB). 1073741824 ``` +## database_atomic_delay_before_drop_table_sec {#database_atomic_delay_before_drop_table_sec} + +Sets the delay before remove table data in seconds. If the query has `SYNC` modifier, this setting is ignored. + +Default value: `480` (8 minute). ## default_database {#default-database} @@ -125,6 +130,25 @@ Settings profiles are located in the file specified in the parameter `user_confi default ``` +## default_replica_path {#default_replica_path} + +The path to the table in ZooKeeper. + +**Example** + +``` xml +/clickhouse/tables/{uuid}/{shard} +``` +## default_replica_name {#default_replica_name} + + The replica name in ZooKeeper. + +**Example** + +``` xml +{replica} +``` + ## dictionaries_config {#server_configuration_parameters-dictionaries_config} The path to the config file for external dictionaries. @@ -321,7 +345,8 @@ Similar to `interserver_http_host`, except that this hostname can be used by oth The username and password used to authenticate during [replication](../../engines/table-engines/mergetree-family/replication.md) with the Replicated\* engines. These credentials are used only for communication between replicas and are unrelated to credentials for ClickHouse clients. The server is checking these credentials for connecting replicas and use the same credentials when connecting to other replicas. So, these credentials should be set the same for all replicas in a cluster. By default, the authentication is not used. -**Note:** These credentials are common for replication through `HTTP` and `HTTPS`. +!!! note "Note" + These credentials are common for replication through `HTTP` and `HTTPS`. This section contains the following parameters: @@ -405,7 +430,7 @@ Keys for syslog: Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise. - format – Message format. Possible values: `bsd` and `syslog.` -## send_crash_reports {#server_configuration_parameters-logger} +## send_crash_reports {#server_configuration_parameters-send_crash_reports} Settings for opt-in sending crash reports to the ClickHouse core developers team via [Sentry](https://sentry.io). Enabling it, especially in pre-production environments, is highly appreciated. @@ -477,12 +502,12 @@ The default `max_server_memory_usage` value is calculated as `memory_amount * ma ## max_server_memory_usage_to_ram_ratio {#max_server_memory_usage_to_ram_ratio} -Defines the fraction of total physical RAM amount, available to the Clickhouse server. If the server tries to utilize more, the memory is cut down to the appropriate amount. +Defines the fraction of total physical RAM amount, available to the ClickHouse server. If the server tries to utilize more, the memory is cut down to the appropriate amount. Possible values: - Positive double. -- 0 — The Clickhouse server can use all available RAM. +- 0 — The ClickHouse server can use all available RAM. Default value: `0`. @@ -502,7 +527,15 @@ On hosts with low RAM and swap, you possibly need setting `max_server_memory_usa ## max_concurrent_queries {#max-concurrent-queries} -The maximum number of simultaneously processed requests. +The maximum number of simultaneously processed queries related to MergeTree table. Queries may be limited by other settings: [max_concurrent_queries_for_all_users](#max-concurrent-queries-for-all-users), [min_marks_to_honor_max_concurrent_queries](#min-marks-to-honor-max-concurrent-queries). + +!!! info "Note" + These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged. + +Possible values: + +- Positive integer. +- 0 — Disabled. **Example** @@ -530,6 +563,21 @@ Default value: `0` that means no limit. - [max_concurrent_queries](#max-concurrent-queries) +## min_marks_to_honor_max_concurrent_queries {#min-marks-to-honor-max-concurrent-queries} + +The minimal number of marks read by the query for applying the [max_concurrent_queries](#max-concurrent-queries) setting. + +Possible values: + +- Positive integer. +- 0 — Disabled. + +**Example** + +``` xml +10 +``` + ## max_connections {#max-connections} The maximum number of inbound connections. @@ -778,7 +826,7 @@ Use the following parameters to configure logging: - `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` defined. - `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. -If the table doesn’t exist, ClickHouse will create it. If the structure of the query log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. +If the table does not exist, ClickHouse will create it. If the structure of the query log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. **Example** @@ -805,7 +853,7 @@ Use the following parameters to configure logging: - `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` defined. - `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. -If the table doesn’t exist, ClickHouse will create it. If the structure of the query thread log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. +If the table does not exist, ClickHouse will create it. If the structure of the query thread log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. **Example** @@ -1107,7 +1155,7 @@ This setting only applies to the `MergeTree` family. It can be specified: If `use_minimalistic_part_header_in_zookeeper = 1`, then [replicated](../../engines/table-engines/mergetree-family/replication.md) tables store the headers of the data parts compactly using a single `znode`. If the table contains many columns, this storage method significantly reduces the volume of the data stored in Zookeeper. !!! attention "Attention" - After applying `use_minimalistic_part_header_in_zookeeper = 1`, you can’t downgrade the ClickHouse server to a version that doesn’t support this setting. Be careful when upgrading ClickHouse on servers in a cluster. Don’t upgrade all the servers at once. It is safer to test new versions of ClickHouse in a test environment, or on just a few servers of a cluster. + After applying `use_minimalistic_part_header_in_zookeeper = 1`, you can’t downgrade the ClickHouse server to a version that does not support this setting. Be careful when upgrading ClickHouse on servers in a cluster. Don’t upgrade all the servers at once. It is safer to test new versions of ClickHouse in a test environment, or on just a few servers of a cluster. Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index 77b68715ba9..10ea46098d4 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -56,6 +56,26 @@ Default value: 150. ClickHouse artificially executes `INSERT` longer (adds ‘sleep’) so that the background merge process can merge parts faster than they are added. +## inactive_parts_to_throw_insert {#inactive-parts-to-throw-insert} + +If the number of inactive parts in a single partition more than the `inactive_parts_to_throw_insert` value, `INSERT` is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. + +Possible values: + +- Any positive integer. + +Default value: 0 (unlimited). + +## inactive_parts_to_delay_insert {#inactive-parts-to-delay-insert} + +If the number of inactive parts in a single partition in the table at least that many the `inactive_parts_to_delay_insert` value, an `INSERT` artificially slows down. It is useful when a server fails to clean up parts quickly enough. + +Possible values: + +- Any positive integer. + +Default value: 0 (unlimited). + ## max_delay_to_insert {#max-delay-to-insert} The value in seconds, which is used to calculate the `INSERT` delay, if the number of active parts in a single partition exceeds the [parts_to_delay_insert](#parts-to-delay-insert) value. @@ -115,6 +135,39 @@ Default value: 604800 (1 week). Similar to [replicated_deduplication_window](#replicated-deduplication-window), `replicated_deduplication_window_seconds` specifies how long to store hash sums of blocks for insert deduplication. Hash sums older than `replicated_deduplication_window_seconds` are removed from Zookeeper, even if they are less than ` replicated_deduplication_window`. +## replicated_fetches_http_connection_timeout {#replicated_fetches_http_connection_timeout} + +HTTP connection timeout (in seconds) for part fetch requests. Inherited from default profile [http_connection_timeout](./settings.md#http_connection_timeout) if not set explicitly. + +Possible values: + +- Any positive integer. +- 0 - Use value of `http_connection_timeout`. + +Default value: 0. + +## replicated_fetches_http_send_timeout {#replicated_fetches_http_send_timeout} + +HTTP send timeout (in seconds) for part fetch requests. Inherited from default profile [http_send_timeout](./settings.md#http_send_timeout) if not set explicitly. + +Possible values: + +- Any positive integer. +- 0 - Use value of `http_send_timeout`. + +Default value: 0. + +## replicated_fetches_http_receive_timeout {#replicated_fetches_http_receive_timeout} + +HTTP receive timeout (in seconds) for fetch part requests. Inherited from default profile [http_receive_timeout](./settings.md#http_receive_timeout) if not set explicitly. + +Possible values: + +- Any positive integer. +- 0 - Use value of `http_receive_timeout`. + +Default value: 0. + ## old_parts_lifetime {#old-parts-lifetime} The time (in seconds) of storing inactive parts to protect against data loss during spontaneous server reboots. @@ -198,4 +251,15 @@ Possible values: Default value: -1 (unlimited). +## allow_floating_point_partition_key {#allow_floating_point_partition_key} + +Enables to allow floating-point number as a partition key. + +Possible values: + +- 0 — Floating-point partition key not allowed. +- 1 — Floating-point partition key allowed. + +Default value: `0`. + [Original article](https://clickhouse.tech/docs/en/operations/settings/merge_tree_settings/) diff --git a/docs/en/operations/settings/query-complexity.md b/docs/en/operations/settings/query-complexity.md index 2ecf50762d5..d60aa170907 100644 --- a/docs/en/operations/settings/query-complexity.md +++ b/docs/en/operations/settings/query-complexity.md @@ -19,7 +19,7 @@ It can take one of two values: `throw` or `break`. Restrictions on aggregation ( `break` – Stop executing the query and return the partial result, as if the source data ran out. -`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don’t add new keys to the set. +`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but do not add new keys to the set. ## max_memory_usage {#settings_max_memory_usage} @@ -27,7 +27,7 @@ The maximum amount of RAM to use for running a query on a single server. In the default configuration file, the maximum is 10 GB. -The setting doesn’t consider the volume of available memory or the total volume of memory on the machine. +The setting does not consider the volume of available memory or the total volume of memory on the machine. The restriction applies to a single query within a single server. You can use `SHOW PROCESSLIST` to see the current memory consumption for each query. Besides, the peak memory consumption is tracked for each query and written to the log. @@ -288,7 +288,7 @@ Defines what action ClickHouse performs when any of the following join limits is Possible values: - `THROW` — ClickHouse throws an exception and breaks operation. -- `BREAK` — ClickHouse breaks operation and doesn’t throw an exception. +- `BREAK` — ClickHouse breaks operation and does not throw an exception. Default value: `THROW`. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 89ccee691e4..10461eacbff 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -143,6 +143,16 @@ Possible values: Default value: 0. +## http_max_uri_size {#http-max-uri-size} + +Sets the maximum URI length of an HTTP request. + +Possible values: + +- Positive integer. + +Default value: 1048576. + ## send_progress_in_http_headers {#settings-send_progress_in_http_headers} Enables or disables `X-ClickHouse-Progress` HTTP response headers in `clickhouse-server` responses. @@ -355,13 +365,37 @@ throws an exception. ## input_format_null_as_default {#settings-input-format-null-as-default} -Enables or disables using default values if input data contain `NULL`, but the data type of the corresponding column in not `Nullable(T)` (for text input formats). +Enables or disables the initialization of [NULL](../../sql-reference/syntax.md#null-literal) fields with [default values](../../sql-reference/statements/create/table.md#create-default-values), if data type of these fields is not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable). +If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting. + +This setting is applicable to [INSERT ... VALUES](../../sql-reference/statements/insert-into.md) queries for text input formats. + +Possible values: + +- 0 — Inserting `NULL` into a not nullable column causes an exception. +- 1 — `NULL` fields are initialized with default column values. + +Default value: `1`. + +## insert_null_as_default {#insert_null_as_default} + +Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type. +If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting. + +This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause. + +Possible values: + +- 0 — Inserting `NULL` into a not nullable column causes an exception. +- 1 — Default column value is inserted instead of `NULL`. + +Default value: `1`. ## input_format_skip_unknown_fields {#settings-input-format-skip-unknown-fields} Enables or disables skipping insertion of extra data. -When writing data, ClickHouse throws an exception if input data contain columns that do not exist in the target table. If skipping is enabled, ClickHouse doesn’t insert extra data and doesn’t throw an exception. +When writing data, ClickHouse throws an exception if input data contain columns that do not exist in the target table. If skipping is enabled, ClickHouse does not insert extra data and does not throw an exception. Supported formats: @@ -418,7 +452,7 @@ Default value: 1. Allows choosing a parser of the text representation of date and time. -The setting doesn’t apply to [date and time functions](../../sql-reference/functions/date-time-functions.md). +The setting does not apply to [date and time functions](../../sql-reference/functions/date-time-functions.md). Possible values: @@ -445,15 +479,15 @@ Possible values: - `simple` - Simple output format. - Clickhouse output date and time `YYYY-MM-DD hh:mm:ss` format. For example, `2019-08-20 10:18:56`. The calculation is performed according to the data type's time zone (if present) or server time zone. + ClickHouse output date and time `YYYY-MM-DD hh:mm:ss` format. For example, `2019-08-20 10:18:56`. The calculation is performed according to the data type's time zone (if present) or server time zone. - `iso` - ISO output format. - Clickhouse output date and time in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) `YYYY-MM-DDThh:mm:ssZ` format. For example, `2019-08-20T10:18:56Z`. Note that output is in UTC (`Z` means UTC). + ClickHouse output date and time in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) `YYYY-MM-DDThh:mm:ssZ` format. For example, `2019-08-20T10:18:56Z`. Note that output is in UTC (`Z` means UTC). - `unix_timestamp` - Unix timestamp output format. - Clickhouse output date and time in [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) format. For example `1566285536`. + ClickHouse output date and time in [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) format. For example `1566285536`. Default value: `simple`. @@ -652,7 +686,7 @@ Default value: 8. ## merge_tree_max_rows_to_use_cache {#setting-merge-tree-max-rows-to-use-cache} -If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it doesn’t use the cache of uncompressed blocks. +If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it does not use the cache of uncompressed blocks. The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. @@ -664,7 +698,7 @@ Default value: 128 ✕ 8192. ## merge_tree_max_bytes_to_use_cache {#setting-merge-tree-max-bytes-to-use-cache} -If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it doesn’t use the cache of uncompressed blocks. +If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it does not use the cache of uncompressed blocks. The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. @@ -769,13 +803,45 @@ Example: log_query_threads=1 ``` +## log_comment {#settings-log-comment} + +Specifies the value for the `log_comment` field of the [system.query_log](../system-tables/query_log.md) table and comment text for the server log. + +It can be used to improve the readability of server logs. Additionally, it helps to select queries related to the test from the `system.query_log` after running [clickhouse-test](../../development/tests.md). + +Possible values: + +- Any string no longer than [max_query_size](#settings-max_query_size). If length is exceeded, the server throws an exception. + +Default value: empty string. + +**Example** + +Query: + +``` sql +SET log_comment = 'log_comment test', log_queries = 1; +SELECT 1; +SYSTEM FLUSH LOGS; +SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test' AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 2; +``` + +Result: + +``` text +┌─type────────┬─query─────┐ +│ QueryStart │ SELECT 1; │ +│ QueryFinish │ SELECT 1; │ +└─────────────┴───────────┘ +``` + ## max_insert_block_size {#settings-max_insert_block_size} The size of blocks (in a count of rows) to form for insertion into a table. This setting only applies in cases when the server forms the blocks. For example, for an INSERT via the HTTP interface, the server parses the data format and forms blocks of the specified size. -But when using clickhouse-client, the client parses the data itself, and the ‘max_insert_block_size’ setting on the server doesn’t affect the size of the inserted blocks. -The setting also doesn’t have a purpose when using INSERT SELECT, since data is inserted using the same blocks that are formed after SELECT. +But when using clickhouse-client, the client parses the data itself, and the ‘max_insert_block_size’ setting on the server does not affect the size of the inserted blocks. +The setting also does not have a purpose when using INSERT SELECT, since data is inserted using the same blocks that are formed after SELECT. Default value: 1,048,576. @@ -822,8 +888,6 @@ For example, when reading from a table, if it is possible to evaluate expression Default value: the number of physical CPU cores. -If less than one SELECT query is normally run on a server at a time, set this parameter to a value slightly less than the actual number of processor cores. - For queries that are completed quickly because of a LIMIT, you can set a lower ‘max_threads’. For example, if the necessary number of entries are located in every block and max_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one. The smaller the `max_threads` value, the less memory is consumed. @@ -847,7 +911,7 @@ Higher values will lead to higher memory usage. The maximum size of blocks of uncompressed data before compressing for writing to a table. By default, 1,048,576 (1 MiB). Specifying smaller block size generally leads to slightly reduced compression ratio, the compression and decompression speed increases slightly due to cache locality, and memory consumption is reduced. !!! note "Warning" - This is an expert-level setting, and you shouldn't change it if you're just getting started with Clickhouse. + This is an expert-level setting, and you shouldn't change it if you're just getting started with ClickHouse. Don’t confuse blocks for compression (a chunk of memory consisting of bytes) with blocks for query processing (a set of rows from a table). @@ -864,7 +928,7 @@ We are writing a UInt32-type column (4 bytes per value). When writing 8192 rows, We are writing a URL column with the String type (average size of 60 bytes per value). When writing 8192 rows, the average will be slightly less than 500 KB of data. Since this is more than 65,536, a compressed block will be formed for each mark. In this case, when reading data from the disk in the range of a single mark, extra data won’t be decompressed. !!! note "Warning" - This is an expert-level setting, and you shouldn't change it if you're just getting started with Clickhouse. + This is an expert-level setting, and you shouldn't change it if you're just getting started with ClickHouse. ## max_query_size {#settings-max_query_size} @@ -978,7 +1042,7 @@ For queries that read at least a somewhat large volume of data (one million rows When using the HTTP interface, the ‘query_id’ parameter can be passed. This is any string that serves as the query identifier. If a query from the same user with the same ‘query_id’ already exists at this time, the behaviour depends on the ‘replace_running_query’ parameter. -`0` (default) – Throw an exception (don’t allow the query to run if a query with the same ‘query_id’ is already running). +`0` (default) – Throw an exception (do not allow the query to run if a query with the same ‘query_id’ is already running). `1` – Cancel the old query and start running the new one. @@ -1037,7 +1101,7 @@ load_balancing = nearest_hostname The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server’s hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). For instance, example01-01-1 and example01-01-2.yandex.ru are different in one position, while example01-01-1 and example01-02-2 differ in two places. -This method might seem primitive, but it doesn’t require external data about network topology, and it doesn’t compare IP addresses, which would be complicated for our IPv6 addresses. +This method might seem primitive, but it does not require external data about network topology, and it does not compare IP addresses, which would be complicated for our IPv6 addresses. Thus, if there are equivalent replicas, the closest one by name is preferred. We can also assume that when sending a query to the same server, in the absence of failures, a distributed query will also go to the same servers. So even if different data is placed on the replicas, the query will return mostly the same results. @@ -1109,7 +1173,7 @@ Default value: `1`. This setting is useful for replicated tables with a sampling key. A query may be processed faster if it is executed on several servers in parallel. But the query performance may degrade in the following cases: -- The position of the sampling key in the partitioning key doesn't allow efficient range scans. +- The position of the sampling key in the partitioning key does not allow efficient range scans. - Adding a sampling key to the table makes filtering by other columns less efficient. - The sampling key is an expression that is expensive to calculate. - The cluster latency distribution has a long tail, so that querying more servers increases the query overall latency. @@ -1131,7 +1195,7 @@ For testing, the value can be set to 0: compilation runs synchronously and the q If the value is 1 or more, compilation occurs asynchronously in a separate thread. The result will be used as soon as it is ready, including queries that are currently running. Compiled code is required for each different combination of aggregate functions used in the query and the type of keys in the GROUP BY clause. -The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don’t use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. +The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they do not use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} @@ -1465,7 +1529,7 @@ Possible values: - 1 — skipping enabled. - If a shard is unavailable, ClickHouse returns a result based on partial data and doesn’t report node availability issues. + If a shard is unavailable, ClickHouse returns a result based on partial data and does not report node availability issues. - 0 — skipping disabled. @@ -1480,8 +1544,8 @@ Do not merge aggregation states from different servers for distributed query pro Possible values: - 0 — Disabled (final query processing is done on the initiator node). -- 1 - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data). -- 2 - Same as 1 but apply `ORDER BY` and `LIMIT` on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`). +- 1 - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards. +- 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possilbe when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`). **Example** @@ -1533,6 +1597,17 @@ Possible values: Default value: 0 +## optimize_skip_unused_shards_rewrite_in {#optimize-skip-unused-shardslrewrite-in} + +Rewrite IN in query for remote shards to exclude values that does not belong to the shard (requires optimize_skip_unused_shards). + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 1 (since it requires `optimize_skip_unused_shards` anyway, which `0` by default) + ## allow_nondeterministic_optimize_skip_unused_shards {#allow-nondeterministic-optimize-skip-unused-shards} Allow nondeterministic (like `rand` or `dictGet`, since later has some caveats with updates) functions in sharding key. @@ -1562,7 +1637,7 @@ Enables or disables query execution if [optimize_skip_unused_shards](#optimize-s Possible values: -- 0 — Disabled. ClickHouse doesn’t throw an exception. +- 0 — Disabled. ClickHouse does not throw an exception. - 1 — Enabled. Query execution is disabled only if the table has a sharding key. - 2 — Enabled. Query execution is disabled regardless of whether a sharding key is defined for the table. @@ -1707,7 +1782,7 @@ Default value: 0. Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core. !!! warning "Warning" - To use this setting, you need to set the `CAP_SYS_NICE` capability. The `clickhouse-server` package sets it up during installation. Some virtual environments don’t allow you to set the `CAP_SYS_NICE` capability. In this case, `clickhouse-server` shows a message about it at the start. + To use this setting, you need to set the `CAP_SYS_NICE` capability. The `clickhouse-server` package sets it up during installation. Some virtual environments do not allow you to set the `CAP_SYS_NICE` capability. In this case, `clickhouse-server` shows a message about it at the start. Possible values: @@ -1882,7 +1957,7 @@ Default value: `0`. Enables or disables random shard insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table when there is no distributed key. -By default, when inserting data into a `Distributed` table with more than one shard, the ClickHouse server will any insertion request if there is no distributed key. When `insert_distributed_one_random_shard = 1`, insertions are allowed and data is forwarded randomly among all shards. +By default, when inserting data into a `Distributed` table with more than one shard, the ClickHouse server will reject any insertion request if there is no distributed key. When `insert_distributed_one_random_shard = 1`, insertions are allowed and data is forwarded randomly among all shards. Possible values: @@ -1983,6 +2058,16 @@ Possible values: Default value: 16. +## background_fetches_pool_size {#background_fetches_pool_size} + +Sets the number of threads performing background fetches for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. This setting is applied at the ClickHouse server start and can’t be changed in a user session. For production usage with frequent small insertions or slow ZooKeeper cluster is recomended to use default value. + +Possible values: + +- Any positive integer. + +Default value: 8. + ## always_fetch_merged_part {#always_fetch_merged_part} Prohibits data parts merging in [Replicated\*MergeTree](../../engines/table-engines/mergetree-family/replication.md)-engine tables. @@ -1992,7 +2077,7 @@ When merging is prohibited, the replica never merges parts and always downloads Possible values: - 0 — `Replicated*MergeTree`-engine tables merge data parts at the replica. -- 1 — `Replicated*MergeTree`-engine tables don’t merge data parts at the replica. The tables download merged data parts from other replicas. +- 1 — `Replicated*MergeTree`-engine tables do not merge data parts at the replica. The tables download merged data parts from other replicas. Default value: 0. @@ -2123,7 +2208,7 @@ Allows or restricts using the [LowCardinality](../../sql-reference/data-types/lo If usage of `LowCardinality` is restricted, ClickHouse server converts `LowCardinality`-columns to ordinary ones for `SELECT` queries, and convert ordinary columns to `LowCardinality`-columns for `INSERT` queries. -This setting is required mainly for third-party clients which don’t support `LowCardinality` data type. +This setting is required mainly for third-party clients which do not support `LowCardinality` data type. Possible values: @@ -2611,7 +2696,7 @@ Possible values: - `'DISTINCT'` — ClickHouse outputs rows as a result of combining queries removing duplicate rows. - `'ALL'` — ClickHouse outputs all rows as a result of combining queries including duplicate rows. -- `''` — Clickhouse generates an exception when used with `UNION`. +- `''` — ClickHouse generates an exception when used with `UNION`. Default value: `''`. @@ -2755,6 +2840,28 @@ Possible values: Default value: `0`. +## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously} + +Adds a modifier `SYNC` to all `DROP` and `DETACH` queries. + +Possible values: + +- 0 — Queries will be executed with delay. +- 1 — Queries will be executed without delay. + +Default value: `0`. + +## show_table_uuid_in_table_create_query_if_not_nil {#show_table_uuid_in_table_create_query_if_not_nil} + +Sets the `SHOW TABLE` query display. + +Possible values: + +- 0 — The query will be displayed without table UUID. +- 1 — The query will be displayed with table UUID. + +Default value: `0`. + ## allow_experimental_live_view {#allow-experimental-live-view} Allows creation of experimental [live views](../../sql-reference/statements/create/view.md#live-view). @@ -2790,4 +2897,172 @@ Sets the interval in seconds after which periodically refreshed [live view](../. Default value: `60`. +## http_connection_timeout {#http_connection_timeout} + +HTTP connection timeout (in seconds). + +Possible values: + +- Any positive integer. +- 0 - Disabled (infinite timeout). + +Default value: 1. + +## http_send_timeout {#http_send_timeout} + +HTTP send timeout (in seconds). + +Possible values: + +- Any positive integer. +- 0 - Disabled (infinite timeout). + +Default value: 1800. + +## http_receive_timeout {#http_receive_timeout} + +HTTP receive timeout (in seconds). + +Possible values: + +- Any positive integer. +- 0 - Disabled (infinite timeout). + +Default value: 1800. + +## check_query_single_value_result {#check_query_single_value_result} + +Defines the level of detail for the [CHECK TABLE](../../sql-reference/statements/check-table.md#checking-mergetree-tables) query result for `MergeTree` family engines . + +Possible values: + +- 0 — the query shows a check status for every individual data part of a table. +- 1 — the query shows the general table check status. + +Default value: `0`. + +## prefer_column_name_to_alias {#prefer-column-name-to-alias} + +Enables or disables using the original column names instead of aliases in query expressions and clauses. It especially matters when alias is the same as the column name, see [Expression Aliases](../../sql-reference/syntax.md#notes-on-usage). Enable this setting to make aliases syntax rules in ClickHouse more compatible with most other database engines. + +Possible values: + +- 0 — The column name is substituted with the alias. +- 1 — The column name is not substituted with the alias. + +Default value: `0`. + +**Example** + +The difference between enabled and disabled: + +Query: + +```sql +SET prefer_column_name_to_alias = 0; +SELECT avg(number) AS number, max(number) FROM numbers(10); +``` + +Result: + +```text +Received exception from server (version 21.5.1): +Code: 184. DB::Exception: Received from localhost:9000. DB::Exception: Aggregate function avg(number) is found inside another aggregate function in query: While processing avg(number) AS number. +``` + +Query: + +```sql +SET prefer_column_name_to_alias = 1; +SELECT avg(number) AS number, max(number) FROM numbers(10); +``` + +Result: + +```text +┌─number─┬─max(number)─┐ +│ 4.5 │ 9 │ +└────────┴─────────────┘ +``` + +## limit {#limit} + +Sets the maximum number of rows to get from the query result. It adjusts the value set by the [LIMIT](../../sql-reference/statements/select/limit.md#limit-clause) clause, so that the limit, specified in the query, cannot exceed the limit, set by this setting. + +Possible values: + +- 0 — The number of rows is not limited. +- Positive integer. + +Default value: `0`. + +## offset {#offset} + +Sets the number of rows to skip before starting to return rows from the query. It adjusts the offset set by the [OFFSET](../../sql-reference/statements/select/offset.md#offset-fetch) clause, so that these two values are summarized. + +Possible values: + +- 0 — No rows are skipped . +- Positive integer. + +Default value: `0`. + +**Example** + +Input table: + +``` sql +CREATE TABLE test (i UInt64) ENGINE = MergeTree() ORDER BY i; +INSERT INTO test SELECT number FROM numbers(500); +``` + +Query: + +``` sql +SET limit = 5; +SET offset = 7; +SELECT * FROM test LIMIT 10 OFFSET 100; +``` +Result: + +``` text +┌───i─┐ +│ 107 │ +│ 108 │ +│ 109 │ +└─────┘ +``` + +## optimize_fuse_sum_count_avg {#optimize_fuse_sum_count_avg} + +Enables to fuse aggregate functions with identical argument. It rewrites query contains at least two aggregate functions from [sum](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum), [count](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count) or [avg](../../sql-reference/aggregate-functions/reference/avg.md#agg_function-avg) with identical argument to [sumCount](../../sql-reference/aggregate-functions/reference/sumcount.md#agg_function-sumCount). + +Possible values: + +- 0 — Functions with identical argument are not fused. +- 1 — Functions with identical argument are fused. + +Default value: `0`. + +**Example** + +Query: + +``` sql +CREATE TABLE fuse_tbl(a Int8, b Int8) Engine = Log; +SET optimize_fuse_sum_count_avg = 1; +EXPLAIN SYNTAX SELECT sum(a), sum(b), count(b), avg(b) from fuse_tbl FORMAT TSV; +``` + +Result: + +``` text +SELECT + sum(a), + sumCount(b).1, + sumCount(b).2, + (sumCount(b).1) / (sumCount(b).2) +FROM fuse_tbl +``` + [Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/en/operations/system-tables/clusters.md b/docs/en/operations/system-tables/clusters.md index cba52586e93..096eca12e7d 100644 --- a/docs/en/operations/system-tables/clusters.md +++ b/docs/en/operations/system-tables/clusters.md @@ -4,63 +4,68 @@ Contains information about clusters available in the config file and the servers Columns: -- `cluster` (String) — The cluster name. -- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. -- `shard_weight` (UInt32) — The relative weight of the shard when writing data. -- `replica_num` (UInt32) — The replica number in the shard, starting from 1. -- `host_name` (String) — The host name, as specified in the config. -- `host_address` (String) — The host IP address obtained from DNS. -- `port` (UInt16) — The port to use for connecting to the server. -- `user` (String) — The name of the user for connecting to the server. -- `errors_count` (UInt32) - number of times this host failed to reach replica. -- `estimated_recovery_time` (UInt32) - seconds left until replica error count is zeroed and it is considered to be back to normal. +- `cluster` ([String](../../sql-reference/data-types/string.md)) — The cluster name. +- `shard_num` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The shard number in the cluster, starting from 1. +- `shard_weight` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The relative weight of the shard when writing data. +- `replica_num` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The replica number in the shard, starting from 1. +- `host_name` ([String](../../sql-reference/data-types/string.md)) — The host name, as specified in the config. +- `host_address` ([String](../../sql-reference/data-types/string.md)) — The host IP address obtained from DNS. +- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The port to use for connecting to the server. +- `is_local` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the host is local. +- `user` ([String](../../sql-reference/data-types/string.md)) — The name of the user for connecting to the server. +- `default_database` ([String](../../sql-reference/data-types/string.md)) — The default database name. +- `errors_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of times this host failed to reach replica. +- `slowdowns_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of slowdowns that led to changing replica when establishing a connection with hedged requests. +- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Seconds remaining until the replica error count is zeroed and it is considered to be back to normal. -Please note that `errors_count` is updated once per query to the cluster, but `estimated_recovery_time` is recalculated on-demand. So there could be a case of non-zero `errors_count` and zero `estimated_recovery_time`, that next query will zero `errors_count` and try to use replica as if it has no errors. +**Example** -**See also** +Query: + +```sql +SELECT * FROM system.clusters LIMIT 2 FORMAT Vertical; +``` + +Result: + +```text +Row 1: +────── +cluster: test_cluster_two_shards +shard_num: 1 +shard_weight: 1 +replica_num: 1 +host_name: 127.0.0.1 +host_address: 127.0.0.1 +port: 9000 +is_local: 1 +user: default +default_database: +errors_count: 0 +slowdowns_count: 0 +estimated_recovery_time: 0 + +Row 2: +────── +cluster: test_cluster_two_shards +shard_num: 2 +shard_weight: 1 +replica_num: 1 +host_name: 127.0.0.2 +host_address: 127.0.0.2 +port: 9000 +is_local: 0 +user: default +default_database: +errors_count: 0 +slowdowns_count: 0 +estimated_recovery_time: 0 +``` + +**See Also** - [Table engine Distributed](../../engines/table-engines/special/distributed.md) - [distributed_replica_error_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap) - [distributed_replica_error_half_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) -**Example** - -```sql -:) SELECT * FROM system.clusters LIMIT 2 FORMAT Vertical; -``` - -```text -Row 1: -────── -cluster: test_cluster -shard_num: 1 -shard_weight: 1 -replica_num: 1 -host_name: clickhouse01 -host_address: 172.23.0.11 -port: 9000 -is_local: 1 -user: default -default_database: -errors_count: 0 -estimated_recovery_time: 0 - -Row 2: -────── -cluster: test_cluster -shard_num: 1 -shard_weight: 1 -replica_num: 2 -host_name: clickhouse02 -host_address: 172.23.0.12 -port: 9000 -is_local: 0 -user: default -default_database: -errors_count: 0 -estimated_recovery_time: 0 - -2 rows in set. Elapsed: 0.002 sec. -``` - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/clusters) diff --git a/docs/en/operations/system-tables/columns.md b/docs/en/operations/system-tables/columns.md index 92a6315d06b..9160dca9a1a 100644 --- a/docs/en/operations/system-tables/columns.md +++ b/docs/en/operations/system-tables/columns.md @@ -4,7 +4,9 @@ Contains information about columns in all the tables. You can use this table to get information similar to the [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table) query, but for multiple tables at once. -The `system.columns` table contains the following columns (the column type is shown in brackets): +Columns from [temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.columns` only in those session where they have been created. They are shown with the empty `database` field. + +Columns: - `database` ([String](../../sql-reference/data-types/string.md)) — Database name. - `table` ([String](../../sql-reference/data-types/string.md)) — Table name. @@ -26,7 +28,7 @@ The `system.columns` table contains the following columns (the column type is sh **Example** ```sql -:) select * from system.columns LIMIT 2 FORMAT Vertical; +SELECT * FROM system.columns LIMIT 2 FORMAT Vertical; ``` ```text @@ -65,8 +67,6 @@ is_in_sorting_key: 0 is_in_primary_key: 0 is_in_sampling_key: 0 compression_codec: - -2 rows in set. Elapsed: 0.002 sec. ``` [Original article](https://clickhouse.tech/docs/en/operations/system_tables/columns) diff --git a/docs/en/operations/system-tables/dictionaries.md b/docs/en/operations/system-tables/dictionaries.md index 3d3bbe2af4e..2bc1be51f19 100644 --- a/docs/en/operations/system-tables/dictionaries.md +++ b/docs/en/operations/system-tables/dictionaries.md @@ -21,6 +21,7 @@ Columns: - `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. - `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. - `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — The percentage of uses for which the value was found. - `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary. - `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). - `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary. @@ -60,4 +61,4 @@ SELECT * FROM system.dictionaries └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) diff --git a/docs/en/operations/system-tables/distribution_queue.md b/docs/en/operations/system-tables/distribution_queue.md index fdc6a134da2..3b09c20874c 100644 --- a/docs/en/operations/system-tables/distribution_queue.md +++ b/docs/en/operations/system-tables/distribution_queue.md @@ -18,6 +18,10 @@ Columns: - `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Size of compressed data in local files, in bytes. +- `broken_data_files` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of files that has been marked as broken (due to an error). + +- `broken_data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Size of compressed data in broken files, in bytes. + - `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any). **Example** diff --git a/docs/en/operations/system-tables/index.md b/docs/en/operations/system-tables/index.md index e66f082167e..ab3ba25493a 100644 --- a/docs/en/operations/system-tables/index.md +++ b/docs/en/operations/system-tables/index.md @@ -59,7 +59,7 @@ For collecting system metrics ClickHouse server uses: **procfs** -If ClickHouse server doesn’t have `CAP_NET_ADMIN` capability, it tries to fall back to `ProcfsMetricsProvider`. `ProcfsMetricsProvider` allows collecting per-query system metrics (for CPU and I/O). +If ClickHouse server does not have `CAP_NET_ADMIN` capability, it tries to fall back to `ProcfsMetricsProvider`. `ProcfsMetricsProvider` allows collecting per-query system metrics (for CPU and I/O). If procfs is supported and enabled on the system, ClickHouse server collects these metrics: diff --git a/docs/en/operations/system-tables/one.md b/docs/en/operations/system-tables/one.md index a85e01bc75a..51316dfbc44 100644 --- a/docs/en/operations/system-tables/one.md +++ b/docs/en/operations/system-tables/one.md @@ -2,7 +2,7 @@ This table contains a single row with a single `dummy` UInt8 column containing the value 0. -This table is used if a `SELECT` query doesn’t specify the `FROM` clause. +This table is used if a `SELECT` query does not specify the `FROM` clause. This is similar to the `DUAL` table found in other DBMSs. diff --git a/docs/en/operations/system-tables/parts.md b/docs/en/operations/system-tables/parts.md index f02d1ebc114..5a4715a4513 100644 --- a/docs/en/operations/system-tables/parts.md +++ b/docs/en/operations/system-tables/parts.md @@ -26,7 +26,7 @@ Columns: - `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging. -- `marks` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesn’t work for adaptive granularity). +- `marks` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint does not work for adaptive granularity). - `rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The number of rows. @@ -66,7 +66,7 @@ Columns: - `primary_key_bytes_in_memory_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) reserved for primary key values. -- `is_frozen` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn’t exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition) +- `is_frozen` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup does not exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition) - `database` ([String](../../sql-reference/data-types/string.md)) – Name of the database. diff --git a/docs/en/operations/system-tables/parts_columns.md b/docs/en/operations/system-tables/parts_columns.md index 5c3dd7155f7..293abb18a50 100644 --- a/docs/en/operations/system-tables/parts_columns.md +++ b/docs/en/operations/system-tables/parts_columns.md @@ -26,7 +26,7 @@ Columns: - `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging. -- `marks` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesn’t work for adaptive granularity). +- `marks` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint does not work for adaptive granularity). - `rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of rows. diff --git a/docs/en/operations/system-tables/processes.md b/docs/en/operations/system-tables/processes.md index a379fc4a07a..9ef3c648006 100644 --- a/docs/en/operations/system-tables/processes.md +++ b/docs/en/operations/system-tables/processes.md @@ -11,7 +11,7 @@ Columns: - `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. - `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. - `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max_memory_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage) setting. -- `query` (String) – The query text. For `INSERT`, it doesn’t include the data to insert. +- `query` (String) – The query text. For `INSERT`, it does not include the data to insert. - `query_id` (String) – Query ID, if defined. diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index 6cf87ee1f17..85f0679fe37 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -3,15 +3,15 @@ Contains information about executed queries, for example, start time, duration of processing, error messages. !!! note "Note" - This table doesn’t contain the ingested data for `INSERT` queries. + This table does not contain the ingested data for `INSERT` queries. You can change settings of queries logging in the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) section of the server configuration. -You can disable queries logging by setting [log_queries = 0](../../operations/settings/settings.md#settings-log-queries). We don’t recommend to turn off logging because information in this table is important for solving issues. +You can disable queries logging by setting [log_queries = 0](../../operations/settings/settings.md#settings-log-queries). We do not recommend to turn off logging because information in this table is important for solving issues. The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query. -ClickHouse doesn’t delete data from the table automatically. See [Introduction](../../operations/system-tables/index.md#system-tables-introduction) for more details. +ClickHouse does not delete data from the table automatically. See [Introduction](../../operations/system-tables/index.md#system-tables-introduction) for more details. The `system.query_log` table registers two kinds of queries: @@ -37,8 +37,8 @@ Columns: - `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution. - `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision. - `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds. -- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it’s `read_rows` value, and the server-initiator of the query summarizes all received and local values. The cache volumes don’t affect this value. -- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it’s `read_bytes` value, and the server-initiator of the query summarizes all received and local values. The cache volumes don’t affect this value. +- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it’s `read_rows` value, and the server-initiator of the query summarizes all received and local values. The cache volumes do not affect this value. +- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it’s `read_bytes` value, and the server-initiator of the query summarizes all received and local values. The cache volumes do not affect this value. - `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. - `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. - `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` query, or a number of rows in the `INSERT` query. diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index 0ae2e7d5d3b..296a33259b3 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -9,7 +9,7 @@ To start logging: The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query. -ClickHouse doesn’t delete data from the table automatically. See [Introduction](../../operations/system-tables/index.md#system-tables-introduction) for more details. +ClickHouse does not delete data from the table automatically. See [Introduction](../../operations/system-tables/index.md#system-tables-introduction) for more details. Columns: diff --git a/docs/en/operations/system-tables/replicas.md b/docs/en/operations/system-tables/replicas.md index 8da68d2d2ab..63a2141e399 100644 --- a/docs/en/operations/system-tables/replicas.md +++ b/docs/en/operations/system-tables/replicas.md @@ -57,7 +57,7 @@ Columns: Note that writes can be performed to any replica that is available and has a session in ZK, regardless of whether it is a leader. - `can_become_leader` (`UInt8`) - Whether the replica can be a leader. - `is_readonly` (`UInt8`) - Whether the replica is in read-only mode. - This mode is turned on if the config doesn’t have sections with ZooKeeper, if an unknown error occurred when reinitializing sessions in ZooKeeper, and during session reinitialization in ZooKeeper. + This mode is turned on if the config does not have sections with ZooKeeper, if an unknown error occurred when reinitializing sessions in ZooKeeper, and during session reinitialization in ZooKeeper. - `is_session_expired` (`UInt8`) - the session with ZooKeeper has expired. Basically the same as `is_readonly`. - `future_parts` (`UInt32`) - The number of data parts that will appear as the result of INSERTs or merges that haven’t been done yet. - `parts_to_check` (`UInt32`) - The number of data parts in the queue for verification. A part is put in the verification queue if there is suspicion that it might be damaged. @@ -84,7 +84,7 @@ The next 4 columns have a non-zero value only where there is an active session w - `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ZooKeeper (i.e., the number of functioning replicas). If you request all the columns, the table may work a bit slowly, since several reads from ZooKeeper are made for each row. -If you don’t request the last 4 columns (log_max_index, log_pointer, total_replicas, active_replicas), the table works quickly. +If you do not request the last 4 columns (log_max_index, log_pointer, total_replicas, active_replicas), the table works quickly. For example, you can check that everything is working correctly like this: @@ -118,7 +118,7 @@ WHERE OR active_replicas < total_replicas ``` -If this query doesn’t return anything, it means that everything is fine. +If this query does not return anything, it means that everything is fine. [Original article](https://clickhouse.tech/docs/en/operations/system_tables/replicas) diff --git a/docs/en/operations/system-tables/replication_queue.md b/docs/en/operations/system-tables/replication_queue.md index d1c74a771c6..965774b81bf 100644 --- a/docs/en/operations/system-tables/replication_queue.md +++ b/docs/en/operations/system-tables/replication_queue.md @@ -14,7 +14,17 @@ Columns: - `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper. -- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS`, or `MUTATE_PARTS`. +- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue, one of: + + - `GET_PART` — Get the part from another replica. + - `ATTACH_PART` — Attach the part, possibly from our own replica (if found in the `detached` folder). You may think of it as a `GET_PART` with some optimizations as they're nearly identical. + - `MERGE_PARTS` — Merge the parts. + - `DROP_RANGE` — Delete the parts in the specified partition in the specified number range. + - `CLEAR_COLUMN` — NOTE: Deprecated. Drop specific column from specified partition. + - `CLEAR_INDEX` — NOTE: Deprecated. Drop specific index from specified partition. + - `REPLACE_RANGE` — Drop a certain range of parts and replace them with new ones. + - `MUTATE_PART` — Apply one or several mutations to the part. + - `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths. - `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution. @@ -67,7 +77,7 @@ parts_to_merge: ['20201130_121373_121378_1','20201130_121379_121379_0',' is_detach: 0 is_currently_executing: 0 num_tries: 36 -last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' doesn't exist (version 20.8.7.15 (official build)) +last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' does not exist (version 20.8.7.15 (official build)) last_attempt_time: 2020-12-08 17:35:54 num_postponed: 0 postpone_reason: diff --git a/docs/en/operations/system-tables/stack_trace.md b/docs/en/operations/system-tables/stack_trace.md index 44b13047cc3..eb1824a6f66 100644 --- a/docs/en/operations/system-tables/stack_trace.md +++ b/docs/en/operations/system-tables/stack_trace.md @@ -6,6 +6,7 @@ To analyze stack frames, use the `addressToLine`, `addressToSymbol` and `demangl Columns: +- `thread_name` ([String](../../sql-reference/data-types/string.md)) — Thread name. - `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread identifier. - `query_id` ([String](../../sql-reference/data-types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query_log](../system-tables/query_log.md) system table. - `trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — A [stack trace](https://en.wikipedia.org/wiki/Stack_trace) which represents a list of physical addresses where the called methods are stored. @@ -21,12 +22,14 @@ SET allow_introspection_functions = 1; Getting symbols from ClickHouse object files: ``` sql -WITH arrayMap(x -> demangle(addressToSymbol(x)), trace) AS all SELECT thread_id, query_id, arrayStringConcat(all, '\n') AS res FROM system.stack_trace LIMIT 1 \G +WITH arrayMap(x -> demangle(addressToSymbol(x)), trace) AS all SELECT thread_name, thread_id, query_id, arrayStringConcat(all, '\n') AS res FROM system.stack_trace LIMIT 1 \G; ``` ``` text Row 1: ────── +thread_name: clickhouse-serv + thread_id: 686 query_id: 1a11f70b-626d-47c1-b948-f9c7b206395d res: sigqueue @@ -51,12 +54,14 @@ __clone Getting filenames and line numbers in ClickHouse source code: ``` sql -WITH arrayMap(x -> addressToLine(x), trace) AS all, arrayFilter(x -> x LIKE '%/dbms/%', all) AS dbms SELECT thread_id, query_id, arrayStringConcat(notEmpty(dbms) ? dbms : all, '\n') AS res FROM system.stack_trace LIMIT 1 \G +WITH arrayMap(x -> addressToLine(x), trace) AS all, arrayFilter(x -> x LIKE '%/dbms/%', all) AS dbms SELECT thread_name, thread_id, query_id, arrayStringConcat(notEmpty(dbms) ? dbms : all, '\n') AS res FROM system.stack_trace LIMIT 1 \G; ``` ``` text Row 1: ────── +thread_name: clickhouse-serv + thread_id: 686 query_id: cad353e7-1c29-4b2e-949f-93e597ab7a54 res: /lib/x86_64-linux-gnu/libc-2.27.so @@ -84,6 +89,3 @@ res: /lib/x86_64-linux-gnu/libc-2.27.so - [system.trace_log](../system-tables/trace_log.md) — Contains stack traces collected by the sampling query profiler. - [arrayMap](../../sql-reference/functions/array-functions.md#array-map) — Description and usage example of the `arrayMap` function. - [arrayFilter](../../sql-reference/functions/array-functions.md#array-filter) — Description and usage example of the `arrayFilter` function. - - -[Original article](https://clickhouse.tech/docs/en/operations/system-tables/stack_trace) diff --git a/docs/en/operations/system-tables/tables.md b/docs/en/operations/system-tables/tables.md index 6ad1425e032..480db3087f6 100644 --- a/docs/en/operations/system-tables/tables.md +++ b/docs/en/operations/system-tables/tables.md @@ -1,107 +1,120 @@ # system.tables {#system-tables} -Contains metadata of each table that the server knows about. Detached tables are not shown in `system.tables`. +Contains metadata of each table that the server knows about. -This table contains the following columns (the column type is shown in brackets): +[Detached](../../sql-reference/statements/detach.md) tables are not shown in `system.tables`. -- `database` (String) — The name of the database the table is in. +[Temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.tables` only in those session where they have been created. They are shown with the empty `database` field and with the `is_temporary` flag switched on. -- `name` (String) — Table name. +Columns: -- `engine` (String) — Table engine name (without parameters). +- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in. -- `is_temporary` (UInt8) - Flag that indicates whether the table is temporary. +- `name` ([String](../../sql-reference/data-types/string.md)) — Table name. -- `data_path` (String) - Path to the table data in the file system. +- `engine` ([String](../../sql-reference/data-types/string.md)) — Table engine name (without parameters). -- `metadata_path` (String) - Path to the table metadata in the file system. +- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag that indicates whether the table is temporary. -- `metadata_modification_time` (DateTime) - Time of latest modification of the table metadata. +- `data_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table data in the file system. -- `dependencies_database` (Array(String)) - Database dependencies. +- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system. -- `dependencies_table` (Array(String)) - Table dependencies ([MaterializedView](../../engines/table-engines/special/materializedview.md) tables based on the current table). +- `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) - Time of latest modification of the table metadata. -- `create_table_query` (String) - The query that was used to create the table. +- `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Database dependencies. -- `engine_full` (String) - Parameters of the table engine. +- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table dependencies ([MaterializedView](../../engines/table-engines/special/materializedview.md) tables based on the current table). -- `partition_key` (String) - The partition key expression specified in the table. +- `create_table_query` ([String](../../sql-reference/data-types/string.md)) - The query that was used to create the table. -- `sorting_key` (String) - The sorting key expression specified in the table. +- `engine_full` ([String](../../sql-reference/data-types/string.md)) - Parameters of the table engine. -- `primary_key` (String) - The primary key expression specified in the table. +- `partition_key` ([String](../../sql-reference/data-types/string.md)) - The partition key expression specified in the table. -- `sampling_key` (String) - The sampling key expression specified in the table. +- `sorting_key` ([String](../../sql-reference/data-types/string.md)) - The sorting key expression specified in the table. -- `storage_policy` (String) - The storage policy: +- `primary_key` ([String](../../sql-reference/data-types/string.md)) - The primary key expression specified in the table. + +- `sampling_key` ([String](../../sql-reference/data-types/string.md)) - The sampling key expression specified in the table. + +- `storage_policy` ([String](../../sql-reference/data-types/string.md)) - The storage policy: - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - [Distributed](../../engines/table-engines/special/distributed.md#distributed) -- `total_rows` (Nullable(UInt64)) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `Null` (including underying `Buffer` table). +- `total_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `NULL` (including underying `Buffer` table). -- `total_bytes` (Nullable(UInt64)) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `Null` (**does not** includes any underlying storage). +- `total_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `NULL` (does not includes any underlying storage). - If the table stores data on disk, returns used space on disk (i.e. compressed). - If the table stores data in memory, returns approximated number of used bytes in memory. -- `lifetime_rows` (Nullable(UInt64)) - Total number of rows INSERTed since server start (only for `Buffer` tables). +- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables). + +- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables). + +- `comment` ([String](../../sql-reference/data-types/string.md)) - The comment for the table. -- `lifetime_bytes` (Nullable(UInt64)) - Total number of bytes INSERTed since server start (only for `Buffer` tables). The `system.tables` table is used in `SHOW TABLES` query implementation. +**Example** + ```sql -:) SELECT * FROM system.tables LIMIT 2 FORMAT Vertical; +SELECT * FROM system.tables LIMIT 2 FORMAT Vertical; ``` ```text Row 1: ────── -database: system -name: aggregate_function_combinators -uuid: 00000000-0000-0000-0000-000000000000 -engine: SystemAggregateFunctionCombinators +database: base +name: t1 +uuid: 81b1c20a-b7c6-4116-a2ce-7583fb6b6736 +engine: MergeTree is_temporary: 0 -data_paths: [] -metadata_path: /var/lib/clickhouse/metadata/system/aggregate_function_combinators.sql -metadata_modification_time: 1970-01-01 03:00:00 +data_paths: ['/var/lib/clickhouse/store/81b/81b1c20a-b7c6-4116-a2ce-7583fb6b6736/'] +metadata_path: /var/lib/clickhouse/store/461/461cf698-fd0b-406d-8c01-5d8fd5748a91/t1.sql +metadata_modification_time: 2021-01-25 19:14:32 dependencies_database: [] dependencies_table: [] -create_table_query: -engine_full: -partition_key: -sorting_key: -primary_key: -sampling_key: -storage_policy: -total_rows: ᴺᵁᴸᴸ -total_bytes: ᴺᵁᴸᴸ +create_table_query: CREATE TABLE base.t1 (`n` UInt64) ENGINE = MergeTree ORDER BY n SETTINGS index_granularity = 8192 +engine_full: MergeTree ORDER BY n SETTINGS index_granularity = 8192 +partition_key: +sorting_key: n +primary_key: n +sampling_key: +storage_policy: default +total_rows: 1 +total_bytes: 99 +lifetime_rows: ᴺᵁᴸᴸ +lifetime_bytes: ᴺᵁᴸᴸ +comment: Row 2: ────── -database: system -name: asynchronous_metrics +database: default +name: 53r93yleapyears uuid: 00000000-0000-0000-0000-000000000000 -engine: SystemAsynchronousMetrics +engine: MergeTree is_temporary: 0 -data_paths: [] -metadata_path: /var/lib/clickhouse/metadata/system/asynchronous_metrics.sql -metadata_modification_time: 1970-01-01 03:00:00 +data_paths: ['/var/lib/clickhouse/data/default/53r93yleapyears/'] +metadata_path: /var/lib/clickhouse/metadata/default/53r93yleapyears.sql +metadata_modification_time: 2020-09-23 09:05:36 dependencies_database: [] dependencies_table: [] -create_table_query: -engine_full: -partition_key: -sorting_key: -primary_key: -sampling_key: -storage_policy: -total_rows: ᴺᵁᴸᴸ -total_bytes: ᴺᵁᴸᴸ - -2 rows in set. Elapsed: 0.004 sec. +create_table_query: CREATE TABLE default.`53r93yleapyears` (`id` Int8, `febdays` Int8) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192 +engine_full: MergeTree ORDER BY id SETTINGS index_granularity = 8192 +partition_key: +sorting_key: id +primary_key: id +sampling_key: +storage_policy: default +total_rows: 2 +total_bytes: 155 +lifetime_rows: ᴺᵁᴸᴸ +lifetime_bytes: ᴺᵁᴸᴸ +comment: ``` [Original article](https://clickhouse.tech/docs/en/operations/system_tables/tables) diff --git a/docs/en/operations/system-tables/trace_log.md b/docs/en/operations/system-tables/trace_log.md index b3b04795a60..e4c01a65d9d 100644 --- a/docs/en/operations/system-tables/trace_log.md +++ b/docs/en/operations/system-tables/trace_log.md @@ -20,10 +20,12 @@ Columns: When connecting to the server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server. -- `timer_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Timer type: +- `trace_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Trace type: - - `Real` represents wall-clock time. - - `CPU` represents CPU time. + - `Real` represents collecting stack traces by wall-clock time. + - `CPU` represents collecting stack traces by CPU time. + - `Memory` represents collecting allocations and deallocations when memory allocation exceeds the subsequent watermark. + - `MemorySample` represents collecting random allocations and deallocations. - `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Thread identifier. diff --git a/docs/en/operations/system-tables/zookeeper.md b/docs/en/operations/system-tables/zookeeper.md index 82ace5e81dc..3b8db14934e 100644 --- a/docs/en/operations/system-tables/zookeeper.md +++ b/docs/en/operations/system-tables/zookeeper.md @@ -5,10 +5,10 @@ The query must either have a ‘path =’ condition or a `path IN` condition The query `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` outputs data for all children on the `/clickhouse` node. To output data for all root nodes, write path = ‘/’. -If the path specified in ‘path’ doesn’t exist, an exception will be thrown. +If the path specified in ‘path’ does not exist, an exception will be thrown. The query `SELECT * FROM system.zookeeper WHERE path IN ('/', '/clickhouse')` outputs data for all children on the `/` and `/clickhouse` node. -If in the specified ‘path’ collection has doesn't exist path, an exception will be thrown. +If in the specified ‘path’ collection has does not exist path, an exception will be thrown. It can be used to do a batch of ZooKeeper path queries. Columns: diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index e62dea0b04e..0b74ae95b06 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -52,7 +52,7 @@ But for storing archives with rare queries, shelves will work. ## RAID {#raid} When using HDD, you can combine their RAID-10, RAID-5, RAID-6 or RAID-50. -For Linux, software RAID is better (with `mdadm`). We don’t recommend using LVM. +For Linux, software RAID is better (with `mdadm`). We do not recommend using LVM. When creating RAID-10, select the `far` layout. If your budget allows, choose RAID-10. @@ -191,8 +191,9 @@ dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/zoo. Java version: ``` text -Java(TM) SE Runtime Environment (build 1.8.0_25-b17) -Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) +openjdk 11.0.5-shenandoah 2019-10-15 +OpenJDK Runtime Environment (build 11.0.5-shenandoah+10-adhoc.heretic.src) +OpenJDK 64-Bit Server VM (build 11.0.5-shenandoah+10-adhoc.heretic.src, mixed mode) ``` JVM parameters: @@ -204,7 +205,7 @@ ZOOCFGDIR=/etc/$NAME/conf # TODO this is really ugly # How to find out, which jars are needed? # seems, that log4j requires the log4j.properties file to be in the classpath -CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper/zookeeper-3.5.1-metrika.jar:/usr/share/zookeeper/slf4j-log4j12-1.7.5.jar:/usr/share/zookeeper/slf4j-api-1.7.5.jar:/usr/share/zookeeper/servlet-api-2.5-20081211.jar:/usr/share/zookeeper/netty-3.7.0.Final.jar:/usr/share/zookeeper/log4j-1.2.16.jar:/usr/share/zookeeper/jline-2.11.jar:/usr/share/zookeeper/jetty-util-6.1.26.jar:/usr/share/zookeeper/jetty-6.1.26.jar:/usr/share/zookeeper/javacc.jar:/usr/share/zookeeper/jackson-mapper-asl-1.9.11.jar:/usr/share/zookeeper/jackson-core-asl-1.9.11.jar:/usr/share/zookeeper/commons-cli-1.2.jar:/usr/src/java/lib/*.jar:/usr/etc/zookeeper" +CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper-3.6.2/lib/audience-annotations-0.5.0.jar:/usr/share/zookeeper-3.6.2/lib/commons-cli-1.2.jar:/usr/share/zookeeper-3.6.2/lib/commons-lang-2.6.jar:/usr/share/zookeeper-3.6.2/lib/jackson-annotations-2.10.3.jar:/usr/share/zookeeper-3.6.2/lib/jackson-core-2.10.3.jar:/usr/share/zookeeper-3.6.2/lib/jackson-databind-2.10.3.jar:/usr/share/zookeeper-3.6.2/lib/javax.servlet-api-3.1.0.jar:/usr/share/zookeeper-3.6.2/lib/jetty-http-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-io-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-security-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-server-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-servlet-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-util-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jline-2.14.6.jar:/usr/share/zookeeper-3.6.2/lib/json-simple-1.1.1.jar:/usr/share/zookeeper-3.6.2/lib/log4j-1.2.17.jar:/usr/share/zookeeper-3.6.2/lib/metrics-core-3.2.5.jar:/usr/share/zookeeper-3.6.2/lib/netty-buffer-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-codec-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-common-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-handler-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-resolver-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-transport-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-transport-native-epoll-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-transport-native-unix-common-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/simpleclient-0.6.0.jar:/usr/share/zookeeper-3.6.2/lib/simpleclient_common-0.6.0.jar:/usr/share/zookeeper-3.6.2/lib/simpleclient_hotspot-0.6.0.jar:/usr/share/zookeeper-3.6.2/lib/simpleclient_servlet-0.6.0.jar:/usr/share/zookeeper-3.6.2/lib/slf4j-api-1.7.25.jar:/usr/share/zookeeper-3.6.2/lib/slf4j-log4j12-1.7.25.jar:/usr/share/zookeeper-3.6.2/lib/snappy-java-1.1.7.jar:/usr/share/zookeeper-3.6.2/lib/zookeeper-3.6.2.jar:/usr/share/zookeeper-3.6.2/lib/zookeeper-jute-3.6.2.jar:/usr/share/zookeeper-3.6.2/lib/zookeeper-prometheus-metrics-3.6.2.jar:/usr/share/zookeeper-3.6.2/etc" ZOOCFG="$ZOOCFGDIR/zoo.cfg" ZOO_LOG_DIR=/var/log/$NAME @@ -213,27 +214,17 @@ GROUP=zookeeper PIDDIR=/var/run/$NAME PIDFILE=$PIDDIR/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME -JAVA=/usr/bin/java +JAVA=/usr/local/jdk-11/bin/java ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" ZOO_LOG4J_PROP="INFO,ROLLINGFILE" JMXLOCALONLY=false JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \ -Xmx{{ '{{' }} cluster.get('xmx','1G') {{ '}}' }} \ - -Xloggc:/var/log/$NAME/zookeeper-gc.log \ - -XX:+UseGCLogFileRotation \ - -XX:NumberOfGCLogFiles=16 \ - -XX:GCLogFileSize=16M \ + -Xlog:safepoint,gc*=info,age*=debug:file=/var/log/$NAME/zookeeper-gc.log:time,level,tags:filecount=16,filesize=16M -verbose:gc \ - -XX:+PrintGCTimeStamps \ - -XX:+PrintGCDateStamps \ - -XX:+PrintGCDetails - -XX:+PrintTenuringDistribution \ - -XX:+PrintGCApplicationStoppedTime \ - -XX:+PrintGCApplicationConcurrentTime \ - -XX:+PrintSafepointStatistics \ - -XX:+UseParNewGC \ - -XX:+UseConcMarkSweepGC \ --XX:+CMSParallelRemarkEnabled" + -XX:+UseG1GC \ + -Djute.maxbuffer=8388608 \ + -XX:MaxGCPauseMillis=50" ``` Salt init: diff --git a/docs/en/operations/troubleshooting.md b/docs/en/operations/troubleshooting.md index 39449afccef..f2695ce8437 100644 --- a/docs/en/operations/troubleshooting.md +++ b/docs/en/operations/troubleshooting.md @@ -55,7 +55,7 @@ If `clickhouse-server` start failed with a configuration error, you should see t 2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused ``` -If you don’t see an error at the end of the file, look through the entire file starting from the string: +If you do not see an error at the end of the file, look through the entire file starting from the string: ``` text Application: starting up. @@ -79,7 +79,7 @@ Revision: 54413 **See system.d logs** -If you don’t find any useful information in `clickhouse-server` logs or there aren’t any logs, you can view `system.d` logs using the command: +If you do not find any useful information in `clickhouse-server` logs or there aren’t any logs, you can view `system.d` logs using the command: ``` bash $ sudo journalctl -u clickhouse-server diff --git a/docs/en/operations/update.md b/docs/en/operations/update.md index 9fa9c44e130..dbcf9ae2b3e 100644 --- a/docs/en/operations/update.md +++ b/docs/en/operations/update.md @@ -15,7 +15,8 @@ $ sudo service clickhouse-server restart If you installed ClickHouse using something other than the recommended `deb` packages, use the appropriate update method. -ClickHouse does not support a distributed update. The operation should be performed consecutively on each separate server. Do not update all the servers on a cluster simultaneously, or the cluster will be unavailable for some time. +!!! note "Note" + You can update multiple servers at once as soon as there is no moment when all replicas of one shard are offline. The upgrade of older version of ClickHouse to specific version: @@ -28,7 +29,3 @@ $ sudo apt-get update $ sudo apt-get install clickhouse-server=xx.yy.a.b clickhouse-client=xx.yy.a.b clickhouse-common-static=xx.yy.a.b $ sudo service clickhouse-server restart ``` - - - - diff --git a/docs/en/operations/utilities/clickhouse-format.md b/docs/en/operations/utilities/clickhouse-format.md new file mode 100644 index 00000000000..17948dce82d --- /dev/null +++ b/docs/en/operations/utilities/clickhouse-format.md @@ -0,0 +1,98 @@ +--- +toc_priority: 65 +toc_title: clickhouse-format +--- + +# clickhouse-format {#clickhouse-format} + +Allows formatting input queries. + +Keys: + +- `--help` or`-h` — Produce help message. +- `--hilite` — Add syntax highlight with ANSI terminal escape sequences. +- `--oneline` — Format in single line. +- `--quiet` or `-q` — Just check syntax, no output on success. +- `--multiquery` or `-n` — Allow multiple queries in the same file. +- `--obfuscate` — Obfuscate instead of formatting. +- `--seed ` — Seed arbitrary string that determines the result of obfuscation. +- `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line. + +## Examples {#examples} + +1. Highlighting and single line: + +```bash +$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);" +``` + +Result: + +```sql +SELECT sum(number) FROM numbers(5) +``` + +2. Multiqueries: + +```bash +$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Result: + +```text +SELECT * +FROM +( + SELECT 1 AS x + UNION ALL + SELECT 1 + UNION DISTINCT + SELECT 3 +) +; +``` + +3. Obfuscating: + +```bash +$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Result: + +```text +SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; +``` + +Same query and another seed string: + +```bash +$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Result: + +```text +SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END; +``` + +4. Adding backslash: + +```bash +$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Result: + +```text +SELECT * \ +FROM \ +( \ + SELECT 1 AS x \ + UNION ALL \ + SELECT 1 \ + UNION DISTINCT \ + SELECT 3 \ +) +``` diff --git a/docs/en/operations/utilities/clickhouse-obfuscator.md b/docs/en/operations/utilities/clickhouse-obfuscator.md index 7fd608fcac0..b01a7624b56 100644 --- a/docs/en/operations/utilities/clickhouse-obfuscator.md +++ b/docs/en/operations/utilities/clickhouse-obfuscator.md @@ -1,42 +1,42 @@ -# ClickHouse obfuscator - -A simple tool for table data obfuscation. - -It reads an input table and produces an output table, that retains some properties of input, but contains different data. -It allows publishing almost real production data for usage in benchmarks. - -It is designed to retain the following properties of data: -- cardinalities of values (number of distinct values) for every column and every tuple of columns; -- conditional cardinalities: number of distinct values of one column under the condition on the value of another column; -- probability distributions of the absolute value of integers; the sign of signed integers; exponent and sign for floats; -- probability distributions of the length of strings; -- probability of zero values of numbers; empty strings and arrays, `NULL`s; - -- data compression ratio when compressed with LZ77 and entropy family of codecs; -- continuity (magnitude of difference) of time values across the table; continuity of floating-point values; -- date component of `DateTime` values; - -- UTF-8 validity of string values; -- string values look natural. - -Most of the properties above are viable for performance testing: - -reading data, filtering, aggregatio, and sorting will work at almost the same speed -as on original data due to saved cardinalities, magnitudes, compression ratios, etc. - -It works in a deterministic fashion: you define a seed value and the transformation is determined by input data and by seed. -Some transformations are one to one and could be reversed, so you need to have a large seed and keep it in secret. - -It uses some cryptographic primitives to transform data but from the cryptographic point of view, it doesn't do it properly, that is why you should not consider the result as secure unless you have another reason. The result may retain some data you don't want to publish. - - -It always leaves 0, 1, -1 numbers, dates, lengths of arrays, and null flags exactly as in source data. -For example, you have a column `IsMobile` in your table with values 0 and 1. In transformed data, it will have the same value. - -So, the user will be able to count the exact ratio of mobile traffic. - -Let's give another example. When you have some private data in your table, like user email and you don't want to publish any single email address. -If your table is large enough and contains multiple different emails and no email has a very high frequency than all others, it will anonymize all data. But if you have a small number of different values in a column, it can reproduce some of them. -You should look at the working algorithm of this tool works, and fine-tune its command line parameters. - -This tool works fine only with an average amount of data (at least 1000s of rows). +# ClickHouse obfuscator + +A simple tool for table data obfuscation. + +It reads an input table and produces an output table, that retains some properties of input, but contains different data. +It allows publishing almost real production data for usage in benchmarks. + +It is designed to retain the following properties of data: +- cardinalities of values (number of distinct values) for every column and every tuple of columns; +- conditional cardinalities: number of distinct values of one column under the condition on the value of another column; +- probability distributions of the absolute value of integers; the sign of signed integers; exponent and sign for floats; +- probability distributions of the length of strings; +- probability of zero values of numbers; empty strings and arrays, `NULL`s; + +- data compression ratio when compressed with LZ77 and entropy family of codecs; +- continuity (magnitude of difference) of time values across the table; continuity of floating-point values; +- date component of `DateTime` values; + +- UTF-8 validity of string values; +- string values look natural. + +Most of the properties above are viable for performance testing: + +reading data, filtering, aggregatio, and sorting will work at almost the same speed +as on original data due to saved cardinalities, magnitudes, compression ratios, etc. + +It works in a deterministic fashion: you define a seed value and the transformation is determined by input data and by seed. +Some transformations are one to one and could be reversed, so you need to have a large seed and keep it in secret. + +It uses some cryptographic primitives to transform data but from the cryptographic point of view, it does not do it properly, that is why you should not consider the result as secure unless you have another reason. The result may retain some data you don't want to publish. + + +It always leaves 0, 1, -1 numbers, dates, lengths of arrays, and null flags exactly as in source data. +For example, you have a column `IsMobile` in your table with values 0 and 1. In transformed data, it will have the same value. + +So, the user will be able to count the exact ratio of mobile traffic. + +Let's give another example. When you have some private data in your table, like user email and you don't want to publish any single email address. +If your table is large enough and contains multiple different emails and no email has a very high frequency than all others, it will anonymize all data. But if you have a small number of different values in a column, it can reproduce some of them. +You should look at the working algorithm of this tool works, and fine-tune its command line parameters. + +This tool works fine only with an average amount of data (at least 1000s of rows). diff --git a/docs/en/operations/utilities/index.md b/docs/en/operations/utilities/index.md index fe5048f7044..4adbb299b1d 100644 --- a/docs/en/operations/utilities/index.md +++ b/docs/en/operations/utilities/index.md @@ -9,5 +9,8 @@ toc_title: Overview - [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` does this. - [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. - [clickhouse-benchmark](../../operations/utilities/clickhouse-benchmark.md) — Loads server with the custom queries and settings. +- [clickhouse-format](../../operations/utilities/clickhouse-format.md) — Enables formatting input queries. +- [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — Obfuscates data. +- [ClickHouse compressor](../../operations/utilities/clickhouse-compressor.md) — Compresses and decompresses data. +- [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — A proxy server for ODBC driver. -[Original article](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/en/sql-reference/aggregate-functions/combinators.md b/docs/en/sql-reference/aggregate-functions/combinators.md index cddef68d49c..3fc5121ebcc 100644 --- a/docs/en/sql-reference/aggregate-functions/combinators.md +++ b/docs/en/sql-reference/aggregate-functions/combinators.md @@ -27,11 +27,41 @@ Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘a ## -SimpleState {#agg-functions-combinator-simplestate} -If you apply this combinator, the aggregate function returns the same value but with a different type. This is an `SimpleAggregateFunction(...)` that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines. +If you apply this combinator, the aggregate function returns the same value but with a different type. This is a [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md) that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) tables. + +**Syntax** + +``` sql +SimpleState(x) +``` + +**Arguments** + +- `x` — Aggregate function parameters. + +**Returned values** + +The value of an aggregate function with the `SimpleAggregateFunction(...)` type. + +**Example** + +Query: + +``` sql +WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1); +``` + +Result: + +``` text +┌─toTypeName(c)────────────────────────┬─c─┐ +│ SimpleAggregateFunction(any, UInt64) │ 0 │ +└──────────────────────────────────────┴───┘ +``` ## -State {#agg-functions-combinator-state} -If you apply this combinator, the aggregate function doesn’t return the resulting value (such as the number of unique values for the [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later. +If you apply this combinator, the aggregate function does not return the resulting value (such as the number of unique values for the [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later. To work with these states, use: @@ -47,7 +77,7 @@ If you apply this combinator, the aggregate function takes the intermediate aggr ## -MergeState {#aggregate_functions_combinators-mergestate} -Merges the intermediate aggregation states in the same way as the -Merge combinator. However, it doesn’t return the resulting value, but an intermediate aggregation state, similar to the -State combinator. +Merges the intermediate aggregation states in the same way as the -Merge combinator. However, it does not return the resulting value, but an intermediate aggregation state, similar to the -State combinator. ## -ForEach {#agg-functions-combinator-foreach} @@ -62,7 +92,7 @@ Examples: `sum(DISTINCT x)`, `groupArray(DISTINCT x)`, `corrStableDistinct(DISTI Changes behavior of an aggregate function. -If an aggregate function doesn’t have input values, with this combinator it returns the default value for its return data type. Applies to the aggregate functions that can take empty input data. +If an aggregate function does not have input values, with this combinator it returns the default value for its return data type. Applies to the aggregate functions that can take empty input data. `-OrDefault` can be used with other combinators. @@ -192,7 +222,7 @@ Lets you divide data into groups, and then separately aggregates the data in tho **Arguments** - `start` — Starting value of the whole required interval for `resampling_key` values. -- `stop` — Ending value of the whole required interval for `resampling_key` values. The whole interval doesn’t include the `stop` value `[start, stop)`. +- `stop` — Ending value of the whole required interval for `resampling_key` values. The whole interval does not include the `stop` value `[start, stop)`. - `step` — Step for separating the whole interval into subintervals. The `aggFunction` is executed over each of those subintervals independently. - `resampling_key` — Column whose values are used for separating data into intervals. - `aggFunction_params` — `aggFunction` parameters. @@ -249,4 +279,3 @@ FROM people └────────┴───────────────────────────┘ ``` - diff --git a/docs/en/sql-reference/aggregate-functions/parametric-functions.md b/docs/en/sql-reference/aggregate-functions/parametric-functions.md index 27f76ebc5b6..e82cb4882a0 100644 --- a/docs/en/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/en/sql-reference/aggregate-functions/parametric-functions.md @@ -9,7 +9,7 @@ Some aggregate functions can accept not only argument columns (used for compress ## histogram {#histogram} -Calculates an adaptive histogram. It doesn’t guarantee precise results. +Calculates an adaptive histogram. It does not guarantee precise results. ``` sql histogram(number_of_bins)(values) @@ -79,7 +79,7 @@ FROM └────────┴───────┘ ``` -In this case, you should remember that you don’t know the histogram bin borders. +In this case, you should remember that you do not know the histogram bin borders. ## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} @@ -114,7 +114,7 @@ Type: `UInt8`. - `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter. -- `.*` — Matches any number of events. You don’t need conditional arguments to match this element of the pattern. +- `.*` — Matches any number of events. You do not need conditional arguments to match this element of the pattern. - `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` matches events that occur more than 1800 seconds from each other. An arbitrary number of any events can lay between these events. You can use the `>=`, `>`, `<`, `<=` operators. @@ -172,7 +172,7 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM ## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} -Counts the number of event chains that matched the pattern. The function searches event chains that don’t overlap. It starts to search for the next chain after the current chain is matched. +Counts the number of event chains that matched the pattern. The function searches event chains that do not overlap. It starts to search for the next chain after the current chain is matched. !!! warning "Warning" Events that occur at the same second may lay in the sequence in an undefined order affecting the result. @@ -253,7 +253,7 @@ windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN) **Parameters** -- `window` — Length of the sliding window. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond2 <= timestamp of cond1 + window`. +- `window` — Length of the sliding window, it is the time interval between the first and the last condition. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond1 <= timestamp of cond2 <= ... <= timestamp of condN <= timestamp of cond1 + window`. - `mode` — It is an optional argument. One or more modes can be set. - `'strict'` — If same condition holds for sequence of events then such non-unique events would be skipped. - `'strict_order'` — Don't allow interventions of other events. E.g. in the case of `A->B->D->C`, it stops finding `A->B->C` at the `D` and the max event level is 2. @@ -312,7 +312,7 @@ FROM GROUP BY user_id ) GROUP BY level -ORDER BY level ASC +ORDER BY level ASC; ``` Result: diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmax.md b/docs/en/sql-reference/aggregate-functions/reference/argmax.md index 72aa607a751..0630e2f585e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/argmax.md +++ b/docs/en/sql-reference/aggregate-functions/reference/argmax.md @@ -6,20 +6,12 @@ toc_priority: 106 Calculates the `arg` value for a maximum `val` value. If there are several different values of `arg` for maximum values of `val`, returns the first of these values encountered. -Tuple version of this function will return the tuple with the maximum `val` value. It is convenient for use with [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md). - **Syntax** ``` sql argMax(arg, val) ``` -or - -``` sql -argMax(tuple(arg, val)) -``` - **Arguments** - `arg` — Argument. @@ -29,13 +21,7 @@ argMax(tuple(arg, val)) - `arg` value that corresponds to maximum `val` value. -Type: matches `arg` type. - -For tuple in the input: - -- Tuple `(arg, val)`, where `val` is the maximum value and `arg` is a corresponding value. - -Type: [Tuple](../../../sql-reference/data-types/tuple.md). +Type: matches `arg` type. **Example** @@ -52,15 +38,13 @@ Input table: Query: ``` sql -SELECT argMax(user, salary), argMax(tuple(user, salary), salary), argMax(tuple(user, salary)) FROM salary; +SELECT argMax(user, salary) FROM salary; ``` Result: ``` text -┌─argMax(user, salary)─┬─argMax(tuple(user, salary), salary)─┬─argMax(tuple(user, salary))─┐ -│ director │ ('director',5000) │ ('director',5000) │ -└──────────────────────┴─────────────────────────────────────┴─────────────────────────────┘ +┌─argMax(user, salary)─┐ +│ director │ +└──────────────────────┘ ``` - -[Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmax/) diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmin.md b/docs/en/sql-reference/aggregate-functions/reference/argmin.md index 7ddc38cd28a..a259a76b7d7 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/argmin.md +++ b/docs/en/sql-reference/aggregate-functions/reference/argmin.md @@ -6,20 +6,12 @@ toc_priority: 105 Calculates the `arg` value for a minimum `val` value. If there are several different values of `arg` for minimum values of `val`, returns the first of these values encountered. -Tuple version of this function will return the tuple with the minimum `val` value. It is convenient for use with [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md). - **Syntax** ``` sql argMin(arg, val) ``` -or - -``` sql -argMin(tuple(arg, val)) -``` - **Arguments** - `arg` — Argument. @@ -29,13 +21,7 @@ argMin(tuple(arg, val)) - `arg` value that corresponds to minimum `val` value. -Type: matches `arg` type. - -For tuple in the input: - -- Tuple `(arg, val)`, where `val` is the minimum value and `arg` is a corresponding value. - -Type: [Tuple](../../../sql-reference/data-types/tuple.md). +Type: matches `arg` type. **Example** @@ -52,15 +38,13 @@ Input table: Query: ``` sql -SELECT argMin(user, salary), argMin(tuple(user, salary)) FROM salary; +SELECT argMin(user, salary) FROM salary ``` Result: ``` text -┌─argMin(user, salary)─┬─argMin(tuple(user, salary))─┐ -│ worker │ ('worker',1000) │ -└──────────────────────┴─────────────────────────────┘ +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ ``` - -[Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmin/) diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md index e0c74576bb6..2945084db77 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md @@ -6,6 +6,9 @@ toc_priority: 141 Sums the arithmetic difference between consecutive rows. If the difference is negative, it is ignored. +!!! info "Note" + The underlying data must be sorted for this function to work properly. If you would like to use this function in a [materialized view](../../../sql-reference/statements/create/view.md#materialized), you most likely want to use the [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp) method instead. + **Syntax** ``` sql diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md new file mode 100644 index 00000000000..241010c4761 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md @@ -0,0 +1,45 @@ +--- +toc_priority: 141 +--- + +# deltaSumTimestamp {#agg_functions-deltasumtimestamp} + +Adds the difference between consecutive rows. If the difference is negative, it is ignored. + +This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that are ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the "right" order. This function keeps track of the `timestamp` of the values it's seen, so it's possible to order the states correctly during merging. + +To calculate the delta sum across an ordered collection you can simply use the [deltaSum](../../../sql-reference/aggregate-functions/reference/deltasum.md#agg_functions-deltasum) function. + +**Syntax** + +``` sql +deltaSumTimestamp(value, timestamp) +``` + +**Arguments** + +- `value` — Input values, must be some [Integer](../../data-types/int-uint.md) type or [Float](../../data-types/float.md) type or a [Date](../../data-types/date.md) or [DateTime](../../data-types/datetime.md). +- `timestamp` — The parameter for order values, must be some [Integer](../../data-types/int-uint.md) type or [Float](../../data-types/float.md) type or a [Date](../../data-types/date.md) or [DateTime](../../data-types/datetime.md). + +**Returned value** + +- Accumulated differences between consecutive values, ordered by the `timestamp` parameter. + +Type: [Integer](../../data-types/int-uint.md) or [Float](../../data-types/float.md) or [Date](../../data-types/date.md) or [DateTime](../../data-types/datetime.md). + +**Example** + +Query: + +```sql +SELECT deltaSumTimestamp(value, timestamp) +FROM (SELECT number AS timestamp, [0, 4, 8, 3, 0, 0, 0, 1, 3, 5][number] AS value FROM numbers(1, 10)); +``` + +Result: + +``` text +┌─deltaSumTimestamp(value, timestamp)─┐ +│ 13 │ +└─────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/max.md b/docs/en/sql-reference/aggregate-functions/reference/max.md index c462dd590a6..25173a48906 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/max.md +++ b/docs/en/sql-reference/aggregate-functions/reference/max.md @@ -4,4 +4,21 @@ toc_priority: 3 # max {#agg_function-max} -Calculates the maximum. +Aggregate function that calculates the maximum across a group of values. + +Example: + +``` +SELECT max(salary) FROM employees; +``` + +``` +SELECT department, max(salary) FROM employees GROUP BY department; +``` + +If you need non-aggregate function to choose a maximum of two values, see `greatest`: + +``` +SELECT greatest(a, b) FROM table; +``` + diff --git a/docs/en/sql-reference/aggregate-functions/reference/min.md b/docs/en/sql-reference/aggregate-functions/reference/min.md index 56b03468243..64b155857f8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/min.md +++ b/docs/en/sql-reference/aggregate-functions/reference/min.md @@ -4,4 +4,20 @@ toc_priority: 2 ## min {#agg_function-min} -Calculates the minimum. +Aggregate function that calculates the minimum across a group of values. + +Example: + +``` +SELECT min(salary) FROM employees; +``` + +``` +SELECT department, min(salary) FROM employees GROUP BY department; +``` + +If you need non-aggregate function to choose a minimum of two values, see `least`: + +``` +SELECT least(a, b) FROM table; +``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md index dcc665a68af..dd0d59978d1 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -6,7 +6,7 @@ toc_priority: 207 Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. -The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic. +Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic. The performance of the function is lower than performance of [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile) or [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index 56ef598f7e7..70f30f3a480 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -12,13 +12,16 @@ The result depends on the order of running the query, and is nondeterministic. When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function. +!!! note "Note" + Using `quantileTDigestWeighted` [is not recommended for tiny data sets](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) and can lead to significat error. In this case, consider possibility of using [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) instead. + **Syntax** ``` sql -quantileTDigest(level)(expr) +quantileTDigestWeighted(level)(expr, weight) ``` -Alias: `medianTDigest`. +Alias: `medianTDigestWeighted`. **Arguments** diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md index 58ce6495a96..dd545c1a485 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -6,7 +6,7 @@ toc_priority: 204 With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. -The result is deterministic (it doesn’t depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. +The result is deterministic (it does not depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function. @@ -31,7 +31,7 @@ Alias: `medianTiming`. The calculation is accurate if: -- Total number of values doesn’t exceed 5670. +- Total number of values does not exceed 5670. - Total number of values exceeds 5670, but the page loading time is less than 1024ms. Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md index fb3b9dbf4d2..25846cde636 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -6,7 +6,7 @@ toc_priority: 205 With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence according to the weight of each sequence member. -The result is deterministic (it doesn’t depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. +The result is deterministic (it does not depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function. @@ -33,7 +33,7 @@ Alias: `medianTimingWeighted`. The calculation is accurate if: -- Total number of values doesn’t exceed 5670. +- Total number of values does not exceed 5670. - Total number of values exceeds 5670, but the page loading time is less than 1024ms. Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms. diff --git a/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md b/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md index 55ee1b8289b..b364317c22b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md +++ b/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md @@ -1,4 +1,8 @@ -## rankCorr {#agg_function-rankcorr} +--- +toc_priority: 145 +--- + +# rankCorr {#agg_function-rankcorr} Computes a rank correlation coefficient. diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumcount.md b/docs/en/sql-reference/aggregate-functions/reference/sumcount.md new file mode 100644 index 00000000000..80e87663f89 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/sumcount.md @@ -0,0 +1,46 @@ +--- +toc_priority: 144 +--- + +# sumCount {#agg_function-sumCount} + +Calculates the sum of the numbers and counts the number of rows at the same time. + +**Syntax** + +``` sql +sumCount(x) +``` + +**Arguments** + +- `x` — Input value, must be [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), or [Decimal](../../../sql-reference/data-types/decimal.md). + +**Returned value** + +- Tuple `(sum, count)`, where `sum` is the sum of numbers and `count` is the number of rows with not-NULL values. + +Type: [Tuple](../../../sql-reference/data-types/tuple.md). + +**Example** + +Query: + +``` sql +CREATE TABLE s_table (x Int8) Engine = Log; +INSERT INTO s_table SELECT number FROM numbers(0, 20); +INSERT INTO s_table VALUES (NULL); +SELECT sumCount(x) from s_table; +``` + +Result: + +``` text +┌─sumCount(x)─┐ +│ (190,20) │ +└─────────────┘ +``` + +**See also** + +- [optimize_fuse_sum_count_avg](../../../operations/settings/settings.md#optimize_fuse_sum_count_avg) setting. diff --git a/docs/en/sql-reference/aggregate-functions/reference/topk.md b/docs/en/sql-reference/aggregate-functions/reference/topk.md index b9bea013ea8..7e6d0db4946 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topk.md @@ -12,7 +12,7 @@ Implements the [Filtered Space-Saving](http://www.l2f.inesc-id.pt/~fmmb/wiki/upl topK(N)(column) ``` -This function doesn’t provide a guaranteed result. In certain situations, errors might occur and it might return frequent values that aren’t the most frequent values. +This function does not provide a guaranteed result. In certain situations, errors might occur and it might return frequent values that aren’t the most frequent values. We recommend using the `N < 10` value; performance is reduced with large `N` values. Maximum value of `N = 65536`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniq.md b/docs/en/sql-reference/aggregate-functions/reference/uniq.md index 7ba2cdc6cb8..598af24c0de 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniq.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniq.md @@ -28,7 +28,7 @@ Function: This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. -- Provides the result deterministically (it doesn’t depend on the query processing order). +- Provides the result deterministically (it does not depend on the query processing order). We recommend using this function in almost all scenarios. @@ -38,3 +38,4 @@ We recommend using this function in almost all scenarios. - [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) +- [uniqTheta](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch) diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md index 4434686ae61..623c43ae10c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -32,7 +32,7 @@ Function: For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. -- Provides the result deterministically (it doesn’t depend on the query processing order). +- Provides the result deterministically (it does not depend on the query processing order). !!! note "Note" Since it uses 32-bit hash for non-`String` type, the result will have very high error for cardinalities significantly larger than `UINT_MAX` (error will raise quickly after a few tens of billions of distinct values), hence in this case you should use [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) @@ -49,3 +49,4 @@ Compared to the [uniq](../../../sql-reference/aggregate-functions/reference/uniq - [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) +- [uniqTheta](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch) diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md index eee675016ee..e446258fbf7 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md @@ -23,3 +23,4 @@ The function takes a variable number of parameters. Parameters can be `Tuple`, ` - [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) - [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12) +- [uniqTheta](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch) diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md index 5b23ea81eae..1d619ab7d93 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -26,14 +26,15 @@ Function: - Uses the HyperLogLog algorithm to approximate the number of different argument values. - 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). + 2^12 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). -- Provides the determinate result (it doesn’t depend on the query processing order). +- Provides the determinate result (it does not depend on the query processing order). -We don’t recommend using this function. In most cases, use the [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) or [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) function. +We do not recommend using this function. In most cases, use the [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) or [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) function. **See Also** - [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) - [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) +- [uniqTheta](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch) diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md new file mode 100644 index 00000000000..b5161462442 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md @@ -0,0 +1,39 @@ +--- +toc_priority: 195 +--- + +# uniqTheta {#agg_function-uniqthetasketch} + +Calculates the approximate number of different argument values, using the [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html). + +``` sql +uniqTheta(x[, ...]) +``` + +**Arguments** + +The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. + +**Returned value** + +- A [UInt64](../../../sql-reference/data-types/int-uint.md)-type number. + +**Implementation details** + +Function: + +- Calculates a hash for all parameters in the aggregate, then uses it in calculations. + +- Uses the [KMV](https://datasketches.apache.org/docs/Theta/InverseEstimate.html) algorithm to approximate the number of different argument values. + + 4096(2^12) 64-bit sketch are used. The size of the state is about 41 KB. + +- The relative error is 3.125% (95% confidence), see the [relative error table](https://datasketches.apache.org/docs/Theta/ThetaErrorTable.html) for detail. + +**See Also** + +- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) +- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) +- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) +- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) +- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) diff --git a/docs/en/sql-reference/data-types/date.md b/docs/en/sql-reference/data-types/date.md index 886e93f433c..0cfac4d59fe 100644 --- a/docs/en/sql-reference/data-types/date.md +++ b/docs/en/sql-reference/data-types/date.md @@ -5,7 +5,7 @@ toc_title: Date # Date {#data_type-date} -A date. Stored in two bytes as the number of days since 1970-01-01 (unsigned). Allows storing values from just after the beginning of the Unix Epoch to the upper threshold defined by a constant at the compilation stage (currently, this is until the year 2106, but the final fully-supported year is 2105). +A date. Stored in two bytes as the number of days since 1970-01-01 (unsigned). Allows storing values from just after the beginning of the Unix Epoch to the upper threshold defined by a constant at the compilation stage (currently, this is until the year 2149, but the final fully-supported year is 2148). The date value is stored without the time zone. diff --git a/docs/en/sql-reference/data-types/datetime.md b/docs/en/sql-reference/data-types/datetime.md index d95abe57510..ed07f599b91 100644 --- a/docs/en/sql-reference/data-types/datetime.md +++ b/docs/en/sql-reference/data-types/datetime.md @@ -23,7 +23,7 @@ The point in time is saved as a [Unix timestamp](https://en.wikipedia.org/wiki/U Timezone agnostic unix timestamp is stored in tables, and the timezone is used to transform it to text format or back during data import/export or to make calendar calculations on the values (example: `toDate`, `toHour` functions et cetera). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. -A list of supported time zones can be found in the [IANA Time Zone Database](https://www.iana.org/time-zones) and also can be queried by `SELECT * FROM system.time_zones`. +A list of supported time zones can be found in the [IANA Time Zone Database](https://www.iana.org/time-zones) and also can be queried by `SELECT * FROM system.time_zones`. [The list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) is also available at Wikipedia. You can explicitly set a time zone for `DateTime`-type columns when creating a table. Example: `DateTime('UTC')`. If the time zone isn’t set, ClickHouse uses the value of the [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) parameter in the server settings or the operating system settings at the moment of the ClickHouse server start. diff --git a/docs/en/sql-reference/data-types/datetime64.md b/docs/en/sql-reference/data-types/datetime64.md index 5cba8315090..1d3725b9fb3 100644 --- a/docs/en/sql-reference/data-types/datetime64.md +++ b/docs/en/sql-reference/data-types/datetime64.md @@ -9,7 +9,7 @@ Allows to store an instant in time, that can be expressed as a calendar date and Tick size (precision): 10-precision seconds -Syntax: +**Syntax:** ``` sql DateTime64(precision, [timezone]) @@ -17,9 +17,11 @@ DateTime64(precision, [timezone]) Internally, stores data as a number of ‘ticks’ since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01.000’). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](../../sql-reference/data-types/datetime.md). +Supported range from January 1, 1925 till December 31, 2283. + ## Examples {#examples} -**1.** Creating a table with `DateTime64`-type column and inserting data into it: +1. Creating a table with `DateTime64`-type column and inserting data into it: ``` sql CREATE TABLE dt @@ -27,15 +29,15 @@ CREATE TABLE dt `timestamp` DateTime64(3, 'Europe/Moscow'), `event_id` UInt8 ) -ENGINE = TinyLog +ENGINE = TinyLog; ``` ``` sql -INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2); ``` ``` sql -SELECT * FROM dt +SELECT * FROM dt; ``` ``` text @@ -45,13 +47,13 @@ SELECT * FROM dt └─────────────────────────┴──────────┘ ``` -- When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'` +- When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'`. - When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Europe/Moscow` timezone and stored as `1546290000000`. -**2.** Filtering on `DateTime64` values +2. Filtering on `DateTime64` values ``` sql -SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow'); ``` ``` text @@ -60,12 +62,12 @@ SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europ └─────────────────────────┴──────────┘ ``` -Unlike `DateTime`, `DateTime64` values are not converted from `String` automatically +Unlike `DateTime`, `DateTime64` values are not converted from `String` automatically. -**3.** Getting a time zone for a `DateTime64`-type value: +3. Getting a time zone for a `DateTime64`-type value: ``` sql -SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x; ``` ``` text @@ -74,13 +76,13 @@ SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS └─────────────────────────┴────────────────────────────────┘ ``` -**4.** Timezone conversion +4. Timezone conversion ``` sql SELECT toDateTime64(timestamp, 3, 'Europe/London') as lon_time, toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time -FROM dt +FROM dt; ``` ``` text @@ -90,7 +92,7 @@ FROM dt └─────────────────────────┴─────────────────────────┘ ``` -## See Also {#see-also} +**See Also** - [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md) - [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md) diff --git a/docs/en/sql-reference/data-types/decimal.md b/docs/en/sql-reference/data-types/decimal.md index b268f747165..af2655cd0c2 100644 --- a/docs/en/sql-reference/data-types/decimal.md +++ b/docs/en/sql-reference/data-types/decimal.md @@ -31,7 +31,7 @@ For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 wit Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string. -Because modern CPU’s do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64. +Because modern CPUs do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64. ## Operations and Result Type {#operations-and-result-type} diff --git a/docs/en/sql-reference/data-types/geo.md b/docs/en/sql-reference/data-types/geo.md index 9ed328e0de6..50093053686 100644 --- a/docs/en/sql-reference/data-types/geo.md +++ b/docs/en/sql-reference/data-types/geo.md @@ -5,7 +5,7 @@ toc_title: Geo # Geo Data Types {#geo-data-types} -Clickhouse supports data types for representing geographical objects — locations, lands, etc. +ClickHouse supports data types for representing geographical objects — locations, lands, etc. !!! warning "Warning" Currently geo data types are an experimental feature. To work with them you must set `allow_experimental_geo_types = 1`. diff --git a/docs/en/sql-reference/data-types/lowcardinality.md b/docs/en/sql-reference/data-types/lowcardinality.md index e0a483973e6..5f0f400ce43 100644 --- a/docs/en/sql-reference/data-types/lowcardinality.md +++ b/docs/en/sql-reference/data-types/lowcardinality.md @@ -55,7 +55,7 @@ Functions: ## See Also {#see-also} - [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality). -- [Reducing Clickhouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/). +- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/). - [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf). [Original article](https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/) diff --git a/docs/en/sql-reference/data-types/nullable.md b/docs/en/sql-reference/data-types/nullable.md index 2cf5e41867e..4207e389734 100644 --- a/docs/en/sql-reference/data-types/nullable.md +++ b/docs/en/sql-reference/data-types/nullable.md @@ -5,7 +5,7 @@ toc_title: Nullable # Nullable(typename) {#data_type-nullable} -Allows to store special marker ([NULL](../../sql-reference/syntax.md)) that denotes “missing value” alongside normal values allowed by `TypeName`. For example, a `Nullable(Int8)` type column can store `Int8` type values, and the rows that don’t have a value will store `NULL`. +Allows to store special marker ([NULL](../../sql-reference/syntax.md)) that denotes “missing value” alongside normal values allowed by `TypeName`. For example, a `Nullable(Int8)` type column can store `Int8` type values, and the rows that do not have a value will store `NULL`. For a `TypeName`, you can’t use composite data types [Array](../../sql-reference/data-types/array.md) and [Tuple](../../sql-reference/data-types/tuple.md). Composite data types can contain `Nullable` type values, such as `Array(Nullable(Int8))`. diff --git a/docs/en/sql-reference/data-types/simpleaggregatefunction.md b/docs/en/sql-reference/data-types/simpleaggregatefunction.md index 244779c5ca8..8138d4a4103 100644 --- a/docs/en/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/en/sql-reference/data-types/simpleaggregatefunction.md @@ -1,6 +1,8 @@ # SimpleAggregateFunction {#data-type-simpleaggregatefunction} -`SimpleAggregateFunction(name, types_of_arguments…)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we don’t have to store and process any extra data. +`SimpleAggregateFunction(name, types_of_arguments…)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. + +The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix. The following aggregate functions are supported: @@ -18,8 +20,6 @@ The following aggregate functions are supported: - [`sumMap`](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) - [`minMap`](../../sql-reference/aggregate-functions/reference/minmap.md#agg_functions-minmap) - [`maxMap`](../../sql-reference/aggregate-functions/reference/maxmap.md#agg_functions-maxmap) -- [`argMin`](../../sql-reference/aggregate-functions/reference/argmin.md) -- [`argMax`](../../sql-reference/aggregate-functions/reference/argmax.md) !!! note "Note" diff --git a/docs/en/sql-reference/data-types/string.md b/docs/en/sql-reference/data-types/string.md index 42d8798c8a3..e72ce8f0b5a 100644 --- a/docs/en/sql-reference/data-types/string.md +++ b/docs/en/sql-reference/data-types/string.md @@ -12,7 +12,7 @@ When creating tables, numeric parameters for string fields can be set (e.g. `VAR ## Encodings {#encodings} -ClickHouse doesn’t have the concept of encodings. Strings can contain an arbitrary set of bytes, which are stored and output as-is. +ClickHouse does not have the concept of encodings. Strings can contain an arbitrary set of bytes, which are stored and output as-is. If you need to store texts, we recommend using UTF-8 encoding. At the very least, if your terminal uses UTF-8 (as recommended), you can read and write your values without making conversions. Similarly, certain functions for working with strings have separate variations that work under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. For example, the ‘length’ function calculates the string length in bytes, while the ‘lengthUTF8’ function calculates the string length in Unicode code points, assuming that the value is UTF-8 encoded. diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index de6a780235f..c69dc4224e6 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -7,9 +7,9 @@ toc_title: Storing Dictionaries in Memory There are a variety of ways to store dictionaries in memory. -We recommend [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) and [complex_key_hashed](#complex-key-hashed). which provide optimal processing speed. +We recommend [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) and [complex_key_hashed](#complex-key-hashed), which provide optimal processing speed. -Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section “[cache](#cache)”. +Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section [cache](#cache). There are several ways to improve dictionary performance: @@ -68,9 +68,9 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings The dictionary is completely stored in memory in the form of flat arrays. How much memory does the dictionary use? The amount is proportional to the size of the largest key (in space used). -The dictionary key has the `UInt64` type and the value is limited to 500,000. If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. +The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type and the value is limited to `max_array_size` (by default — 500,000). If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. Dictionary flat arrays initial size is controlled by `initial_array_size` setting (by default — 1024). -All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. +All types of sources are supported. When updating, data (from a file or from a table) is read in it entirety. This method provides the best performance among all available methods of storing the dictionary. @@ -78,21 +78,27 @@ Configuration example: ``` xml - + + 50000 + 5000000 + ``` or ``` sql -LAYOUT(FLAT()) +LAYOUT(FLAT(INITIAL_ARRAY_SIZE 50000 MAX_ARRAY_SIZE 5000000)) ``` ### hashed {#dicts-external_dicts_dict_layout-hashed} The dictionary is completely stored in memory in the form of a hash table. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items. -The hash table will be preallocated (this will make dictionary load faster), if the is approx number of total rows is known, this is supported only if the source is `clickhouse` without any `` (since in case of `` you can filter out too much rows and the dictionary will allocate too much memory, that will not be used eventually). +If `preallocate` is `true` (default is `false`) the hash table will be preallocated (this will make the dictionary load faster). But note that you should use it only if: + +- The source support an approximate number of elements (for now it is supported only by the `ClickHouse` source). +- There are no duplicates in the data (otherwise it may increase memory usage for the hashtable). All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. @@ -100,21 +106,23 @@ Configuration example: ``` xml - + + 0 + ``` or ``` sql -LAYOUT(HASHED()) +LAYOUT(HASHED(PREALLOCATE 0)) ``` ### sparse_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} Similar to `hashed`, but uses less memory in favor more CPU usage. -It will be also preallocated so as `hashed`, note that it is even more significant for `sparse_hashed`. +It will be also preallocated so as `hashed` (with `preallocate` set to `true`), and note that it is even more significant for `sparse_hashed`. Configuration example: @@ -124,8 +132,10 @@ Configuration example: ``` +or + ``` sql -LAYOUT(SPARSE_HASHED()) +LAYOUT(SPARSE_HASHED([PREALLOCATE 0])) ``` ### complex_key_hashed {#complex-key-hashed} @@ -328,7 +338,7 @@ or ``` sql LAYOUT(SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 16777216 READ_BUFFER_SIZE 1048576 - PATH /var/lib/clickhouse/clickhouse_dictionaries/test_dict)) + PATH ./user_files/test_dict)) ``` ### complex_key_ssd_cache {#complex-key-ssd-cache} diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 081cc5b0b69..04901c1ad57 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -30,7 +30,7 @@ LIFETIME(300) Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. -You can set a time interval for upgrades, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when upgrading on a large number of servers. +You can set a time interval for updates, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when updating on a large number of servers. Example of settings: @@ -54,7 +54,7 @@ LIFETIME(MIN 300 MAX 360) If `0` and `0`, ClickHouse does not reload the dictionary by timeout. In this case, ClickHouse can reload the dictionary earlier if the dictionary configuration file was changed or the `SYSTEM RELOAD DICTIONARY` command was executed. -When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): +When updating the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): - For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. - For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`. @@ -86,3 +86,4 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... ``` +For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronious updates are supported. diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index b7129725820..5a7efa37fd1 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -53,7 +53,7 @@ optional settings are available: or ``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) SETTINGS(format_csv_allow_single_quotes = 0) ``` @@ -69,6 +69,8 @@ Types of sources (`source_type`): - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - [Redis](#dicts-external_dicts_dict_sources-redis) + - [Cassandra](#dicts-external_dicts_dict_sources-cassandra) + - [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql) ## Local File {#dicts-external_dicts_dict_sources-local_file} @@ -86,7 +88,7 @@ Example of settings: or ``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) ``` Setting fields: diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index dbf2fa67ac5..c6770b531f4 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -159,14 +159,14 @@ Configuration fields: | Tag | Description | Required | |------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| | `name` | Column name. | Yes | -| `type` | ClickHouse data type.
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../../sql-reference/data-types/nullable.md) is not supported. | Yes | -| `null_value` | Default value for a non-existing element.
In the example, it is an empty string. You cannot use `NULL` in this field. | Yes | +| `type` | ClickHouse data type.
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../../sql-reference/data-types/nullable.md) is currently supported for [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache) dictionaries. In [IPTrie](external-dicts-dict-layout.md#ip-trie) dictionaries `Nullable` types are not supported. | Yes | +| `null_value` | Default value for a non-existing element.
In the example, it is an empty string. [NULL](../../syntax.md#null-literal) value can be used only for the `Nullable` types (see the previous line with types description). | Yes | | `expression` | [Expression](../../../sql-reference/syntax.md#syntax-expressions) that ClickHouse executes on the value.
The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

Default value: no expression. | No | | `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md).

Default value: `false`. | No | | `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

Default value: `false`. | No | | `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

Default value: `false`. | No | -## See Also {#see-also} +**See Also** - [Functions for working with external dictionaries](../../../sql-reference/functions/ext-dict-functions.md). diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md index 8217fb8da3a..d229336c58d 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md @@ -43,7 +43,7 @@ The dictionary configuration file has the following format: You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file. -[DDL queries for dictionaries](../../../sql-reference/statements/create/dictionary.md) doesn’t require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views. +[DDL queries for dictionaries](../../../sql-reference/statements/create/dictionary.md) does not require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views. !!! attention "Attention" You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to external dictionaries. diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index fa127dab103..22f4182a1c0 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -10,8 +10,6 @@ A dictionary is a mapping (`key -> attributes`) that is convenient for various t ClickHouse supports special functions for working with dictionaries that can be used in queries. It is easier and more efficient to use dictionaries with functions than a `JOIN` with reference tables. -[NULL](../../sql-reference/syntax.md#null-literal) values can’t be stored in a dictionary. - ClickHouse supports: - [Built-in dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md). diff --git a/docs/en/sql-reference/dictionaries/internal-dicts.md b/docs/en/sql-reference/dictionaries/internal-dicts.md index 472351a19a4..9f142c4207d 100644 --- a/docs/en/sql-reference/dictionaries/internal-dicts.md +++ b/docs/en/sql-reference/dictionaries/internal-dicts.md @@ -31,7 +31,7 @@ You can also create these files yourself. The file format is as follows: - region ID (`UInt32`) - parent region ID (`UInt32`) -- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types don’t have values +- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types do not have values - population (`UInt32`) — optional column `regions_names_*.txt`: TabSeparated (no header), columns: diff --git a/docs/en/sql-reference/functions/arithmetic-functions.md b/docs/en/sql-reference/functions/arithmetic-functions.md index faa03dfc9d3..3187f13b5b9 100644 --- a/docs/en/sql-reference/functions/arithmetic-functions.md +++ b/docs/en/sql-reference/functions/arithmetic-functions.md @@ -70,7 +70,7 @@ Calculates a number with the reverse sign. The result is always signed. ## abs(a) {#arithm_func-abs} -Calculates the absolute value of the number (a). That is, if a \< 0, it returns -a. For unsigned types it doesn’t do anything. For signed integer types, it returns an unsigned number. +Calculates the absolute value of the number (a). That is, if a \< 0, it returns -a. For unsigned types it does not do anything. For signed integer types, it returns an unsigned number. ## gcd(a, b) {#gcda-b} diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 5e1d9d4ba23..611f620ed9f 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -125,7 +125,7 @@ hasAll(set, subset) - An empty array is a subset of any array. - `Null` processed as a value. -- Order of values in both of arrays doesn’t matter. +- Order of values in both of arrays does not matter. **Examples** @@ -162,7 +162,7 @@ hasAny(array1, array2) **Peculiar properties** - `Null` processed as a value. -- Order of values in both of arrays doesn’t matter. +- Order of values in both of arrays does not matter. **Examples** @@ -245,7 +245,7 @@ Elements set to `NULL` are handled as normal values. Returns the number of elements in the arr array for which func returns something other than 0. If ‘func’ is not specified, it returns the number of non-zero elements in the array. -Note that the `arrayCount` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. +Note that the `arrayCount` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. ## countEqual(arr, x) {#countequalarr-x} @@ -602,7 +602,7 @@ SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; └────────────────────┘ ``` -Here, the elements that are passed in the second array (\[2, 1\]) define a sorting key for the corresponding element from the source array (\[‘hello’, ‘world’\]), that is, \[‘hello’ –\> 2, ‘world’ –\> 1\]. Since the lambda function doesn’t use `x`, actual values of the source array don’t affect the order in the result. So, ‘hello’ will be the second element in the result, and ‘world’ will be the first. +Here, the elements that are passed in the second array (\[2, 1\]) define a sorting key for the corresponding element from the source array (\[‘hello’, ‘world’\]), that is, \[‘hello’ –\> 2, ‘world’ –\> 1\]. Since the lambda function does not use `x`, actual values of the source array do not affect the order in the result. So, ‘hello’ will be the second element in the result, and ‘world’ will be the first. Other examples are shown below. @@ -1229,7 +1229,7 @@ SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, └────────────────────────────────────┘ ``` -Note that the `arrayReverseFilter` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. +Note that the `arrayReverseFill` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. ## arraySplit(func, arr1, …) {#array-split} @@ -1293,7 +1293,7 @@ Note that the `arrayFirstIndex` is a [higher-order function](../../sql-reference ## arrayMin {#array-min} -Returns the minimum of elements in the source array. +Returns the minimum of elements in the source array. If the `func` function is specified, returns the mininum of elements converted by this function. @@ -1312,9 +1312,9 @@ arrayMin([func,] arr) **Returned value** -- The minimum of function values (or the array minimum). +- The minimum of function values (or the array minimum). -Type: if `func` is specified, matches `func` return value type, else matches the array elements type. +Type: if `func` is specified, matches `func` return value type, else matches the array elements type. **Examples** @@ -1348,7 +1348,7 @@ Result: ## arrayMax {#array-max} -Returns the maximum of elements in the source array. +Returns the maximum of elements in the source array. If the `func` function is specified, returns the maximum of elements converted by this function. @@ -1367,9 +1367,9 @@ arrayMax([func,] arr) **Returned value** -- The maximum of function values (or the array maximum). +- The maximum of function values (or the array maximum). -Type: if `func` is specified, matches `func` return value type, else matches the array elements type. +Type: if `func` is specified, matches `func` return value type, else matches the array elements type. **Examples** @@ -1403,7 +1403,7 @@ Result: ## arraySum {#array-sum} -Returns the sum of elements in the source array. +Returns the sum of elements in the source array. If the `func` function is specified, returns the sum of elements converted by this function. @@ -1418,7 +1418,7 @@ arraySum([func,] arr) **Arguments** - `func` — Function. [Expression](../../sql-reference/data-types/special-data-types/expression.md). -- `arr` — Array. [Array](../../sql-reference/data-types/array.md). +- `arr` — Array. [Array](../../sql-reference/data-types/array.md). **Returned value** @@ -1458,7 +1458,7 @@ Result: ## arrayAvg {#array-avg} -Returns the average of elements in the source array. +Returns the average of elements in the source array. If the `func` function is specified, returns the average of elements converted by this function. @@ -1473,7 +1473,7 @@ arrayAvg([func,] arr) **Arguments** - `func` — Function. [Expression](../../sql-reference/data-types/special-data-types/expression.md). -- `arr` — Array. [Array](../../sql-reference/data-types/array.md). +- `arr` — Array. [Array](../../sql-reference/data-types/array.md). **Returned value** @@ -1544,3 +1544,52 @@ SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res ``` Note that the `arraySumNonNegative` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. +## arrayProduct {#arrayproduct} + +Multiplies elements of an [array](../../sql-reference/data-types/array.md). + +**Syntax** + +``` sql +arrayProduct(arr) +``` + +**Arguments** + +- `arr` — [Array](../../sql-reference/data-types/array.md) of numeric values. + +**Returned value** + +- A product of array's elements. + +Type: [Float64](../../sql-reference/data-types/float.md). + +**Examples** + +Query: + +``` sql +SELECT arrayProduct([1,2,3,4,5,6]) as res; +``` + +Result: + +``` text +┌─res───┐ +│ 720 │ +└───────┘ +``` + +Query: + +``` sql +SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as res, toTypeName(res); +``` + +Return value type is always [Float64](../../sql-reference/data-types/float.md). Result: + +``` text +┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐ +│ 6 │ Float64 │ +└─────┴──────────────────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/array-join.md b/docs/en/sql-reference/functions/array-join.md index f35e0d10117..e87d0bca4bb 100644 --- a/docs/en/sql-reference/functions/array-join.md +++ b/docs/en/sql-reference/functions/array-join.md @@ -7,7 +7,7 @@ toc_title: arrayJoin This is a very unusual function. -Normal functions don’t change a set of rows, but just change the values in each row (map). +Normal functions do not change a set of rows, but just change the values in each row (map). Aggregate functions compress a set of rows (fold or reduce). The ‘arrayJoin’ function takes each row and generates a set of rows (unfold). diff --git a/docs/en/sql-reference/functions/bit-functions.md b/docs/en/sql-reference/functions/bit-functions.md index 31d09e48e01..57e55a7da56 100644 --- a/docs/en/sql-reference/functions/bit-functions.md +++ b/docs/en/sql-reference/functions/bit-functions.md @@ -228,7 +228,7 @@ bitCount(x) - Number of bits set to one in the input number. -The function doesn’t convert input value to a larger type ([sign extension](https://en.wikipedia.org/wiki/Sign_extension)). So, for example, `bitCount(toUInt8(-1)) = 8`. +The function does not convert input value to a larger type ([sign extension](https://en.wikipedia.org/wiki/Sign_extension)). So, for example, `bitCount(toUInt8(-1)) = 8`. Type: `UInt8`. @@ -250,3 +250,53 @@ Result: └───────────────┘ ``` +## bitHammingDistance {#bithammingdistance} + +Returns the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) between the bit representations of two integer values. Can be used with [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash) functions for detection of semi-duplicate strings. The smaller is the distance, the more likely those strings are the same. + +**Syntax** + +``` sql +bitHammingDistance(int1, int2) +``` + +**Arguments** + +- `int1` — First integer value. [Int64](../../sql-reference/data-types/int-uint.md). +- `int2` — Second integer value. [Int64](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- The Hamming distance. + +Type: [UInt8](../../sql-reference/data-types/int-uint.md). + +**Examples** + +Query: + +``` sql +SELECT bitHammingDistance(111, 121); +``` + +Result: + +``` text +┌─bitHammingDistance(111, 121)─┐ +│ 3 │ +└──────────────────────────────┘ +``` + +With [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash): + +``` sql +SELECT bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat')); +``` + +Result: + +``` text +┌─bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat'))─┐ +│ 5 │ +└──────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md index 7ec400949e9..c695c894784 100644 --- a/docs/en/sql-reference/functions/bitmap-functions.md +++ b/docs/en/sql-reference/functions/bitmap-functions.md @@ -33,7 +33,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res); ``` text ┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ -│  │ AggregateFunction(groupBitmap, UInt8) │ +│ │ AggregateFunction(groupBitmap, UInt8) │ └─────┴──────────────────────────────────────────────┘ ``` @@ -140,7 +140,7 @@ bitmapContains(haystack, needle) **Returned values** -- 0 — If `haystack` doesn’t contain `needle`. +- 0 — If `haystack` does not contain `needle`. - 1 — If `haystack` contains `needle`. Type: `UInt8`. diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 6b26dae4546..afbaed2b413 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -5,7 +5,7 @@ toc_title: Dates and Times # Functions for Working with Dates and Times {#functions-for-working-with-dates-and-times} -Support for time zones +Support for time zones. All functions for working with the date and time that have a logical use for the time zone can accept a second optional time zone argument. Example: Asia/Yekaterinburg. In this case, they use the specified time zone instead of the local (default) one. @@ -23,13 +23,53 @@ SELECT └─────────────────────┴────────────┴────────────┴─────────────────────┘ ``` +## timeZone {#timezone} + +Returns the timezone of the server. + +**Syntax** + +``` sql +timeZone() +``` + +Alias: `timezone`. + +**Returned value** + +- Timezone. + +Type: [String](../../sql-reference/data-types/string.md). + ## toTimeZone {#totimezone} -Convert time or date and time to the specified time zone. The time zone is an attribute of the Date/DateTime types. The internal value (number of seconds) of the table field or of the resultset's column does not change, the column's type changes and its string representation changes accordingly. +Converts time or date and time to the specified time zone. The time zone is an attribute of the `Date` and `DateTime` data types. The internal value (number of seconds) of the table field or of the resultset's column does not change, the column's type changes and its string representation changes accordingly. + +**Syntax** + +``` sql +toTimezone(value, timezone) +``` + +Alias: `toTimezone`. + +**Arguments** + +- `value` — Time or date and time. [DateTime64](../../sql-reference/data-types/datetime64.md). +- `timezone` — Timezone for the returned value. [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- Date and time. + +Type: [DateTime](../../sql-reference/data-types/datetime.md). + +**Example** + +Query: ```sql -SELECT - toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, +SELECT toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, toTypeName(time_utc) AS type_utc, toInt32(time_utc) AS int32utc, toTimeZone(time_utc, 'Asia/Yekaterinburg') AS time_yekat, @@ -40,6 +80,7 @@ SELECT toInt32(time_samoa) AS int32samoa FORMAT Vertical; ``` +Result: ```text Row 1: @@ -57,6 +98,82 @@ int32samoa: 1546300800 `toTimeZone(time_utc, 'Asia/Yekaterinburg')` changes the `DateTime('UTC')` type to `DateTime('Asia/Yekaterinburg')`. The value (Unixtimestamp) 1546300800 stays the same, but the string representation (the result of the toString() function) changes from `time_utc: 2019-01-01 00:00:00` to `time_yekat: 2019-01-01 05:00:00`. +## timeZoneOf {#timezoneof} + +Returns the timezone name of [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md) data types. + +**Syntax** + +``` sql +timeZoneOf(value) +``` + +Alias: `timezoneOf`. + +**Arguments** + +- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Returned value** + +- Timezone name. + +Type: [String](../../sql-reference/data-types/string.md). + +**Example** + +Query: +``` sql +SELECT timezoneOf(now()); +``` + +Result: +``` text +┌─timezoneOf(now())─┐ +│ Etc/UTC │ +└───────────────────┘ +``` + +## timeZoneOffset {#timezoneoffset} + +Returns a timezone offset in seconds from [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time). The function takes into account [daylight saving time](https://en.wikipedia.org/wiki/Daylight_saving_time) and historical timezone changes at the specified date and time. +[IANA timezone database](https://www.iana.org/time-zones) is used to calculate the offset. + +**Syntax** + +``` sql +timeZoneOffset(value) +``` + +Alias: `timezoneOffset`. + +**Arguments** + +- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Returned value** + +- Offset from UTC in seconds. + +Type: [Int32](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT toDateTime('2021-04-21 10:20:30', 'America/New_York') AS Time, toTypeName(Time) AS Type, + timeZoneOffset(Time) AS Offset_in_seconds, (Offset_in_seconds / 3600) AS Offset_in_hours; +``` + +Result: + +``` text +┌────────────────Time─┬─Type─────────────────────────┬─Offset_in_seconds─┬─Offset_in_hours─┐ +│ 2021-04-21 10:20:30 │ DateTime('America/New_York') │ -14400 │ -4 │ +└─────────────────────┴──────────────────────────────┴───────────────────┴─────────────────┘ +``` + ## toYear {#toyear} Converts a date or date with time to a UInt16 number containing the year number (AD). @@ -147,6 +264,9 @@ Result: └────────────────┘ ``` +!!! attention "Attention" + The return type `toStartOf*` functions described below is `Date` or `DateTime`. Though these functions can take `DateTime64` as an argument, passing them a `DateTime64` that is out of normal range (years 1970 - 2105) will give incorrect result. + ## toStartOfYear {#tostartofyear} Rounds down a date or date with time to the first day of the year. @@ -340,7 +460,7 @@ For mode values with a meaning of “with 4 or more days this year,” weeks are - Otherwise, it is the last week of the previous year, and the next week is week 1. -For mode values with a meaning of “contains January 1”, the week contains January 1 is week 1. It doesn’t matter how many days in the new year the week contained, even if it contained only one day. +For mode values with a meaning of “contains January 1”, the week contains January 1 is week 1. It does not matter how many days in the new year the week contained, even if it contained only one day. ``` sql toWeek(date, [, mode][, Timezone]) @@ -388,13 +508,13 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d Truncates date and time data to the specified part of date. -**Syntax** +**Syntax** ``` sql date_trunc(unit, value[, timezone]) ``` -Alias: `dateTrunc`. +Alias: `dateTrunc`. **Arguments** @@ -457,13 +577,13 @@ Result: Adds the time interval or date interval to the provided date or date with time. -**Syntax** +**Syntax** ``` sql date_add(unit, value, date) ``` -Aliases: `dateAdd`, `DATE_ADD`. +Aliases: `dateAdd`, `DATE_ADD`. **Arguments** @@ -478,7 +598,7 @@ Aliases: `dateAdd`, `DATE_ADD`. - `month` - `quarter` - `year` - + - `value` — Value of interval to add. [Int](../../sql-reference/data-types/int-uint.md). - `date` — The date or date with time to which `value` is added. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). @@ -583,7 +703,7 @@ Aliases: `dateSub`, `DATE_SUB`. - `month` - `quarter` - `year` - + - `value` — Value of interval to subtract. [Int](../../sql-reference/data-types/int-uint.md). - `date` — The date or date with time from which `value` is subtracted. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). @@ -613,16 +733,16 @@ Result: Adds the specified time value with the provided date or date time value. -**Syntax** +**Syntax** ``` sql timestamp_add(date, INTERVAL value unit) ``` -Aliases: `timeStampAdd`, `TIMESTAMP_ADD`. +Aliases: `timeStampAdd`, `TIMESTAMP_ADD`. **Arguments** - + - `date` — Date or date with time. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). - `value` — Value of interval to add. [Int](../../sql-reference/data-types/int-uint.md). - `unit` — The type of interval to add. [String](../../sql-reference/data-types/string.md). @@ -642,7 +762,7 @@ Aliases: `timeStampAdd`, `TIMESTAMP_ADD`. Date or date with time with the specified `value` expressed in `unit` added to `date`. Type: [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). - + **Example** Query: @@ -663,13 +783,13 @@ Result: Subtracts the time interval from the provided date or date with time. -**Syntax** +**Syntax** ``` sql timestamp_sub(unit, value, date) ``` -Aliases: `timeStampSub`, `TIMESTAMP_SUB`. +Aliases: `timeStampSub`, `TIMESTAMP_SUB`. **Arguments** @@ -684,7 +804,7 @@ Aliases: `timeStampSub`, `TIMESTAMP_SUB`. - `month` - `quarter` - `year` - + - `value` — Value of interval to subtract. [Int](../../sql-reference/data-types/int-uint.md). - `date` — Date or date with time. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). @@ -709,12 +829,12 @@ Result: │ 2018-07-18 01:02:03 │ └──────────────────────────────────────────────────────────────┘ ``` - + ## now {#now} -Returns the current date and time. +Returns the current date and time. -**Syntax** +**Syntax** ``` sql now([timezone]) @@ -1069,4 +1189,3 @@ Result: │ 2020-01-01 │ └────────────────────────────────────┘ ``` - diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 6b72d3c2269..167afdabb80 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -87,8 +87,6 @@ The function is using uppercase letters `A-F` and not using any prefixes (like ` For integer arguments, it prints hex digits (“nibbles”) from the most significant to least significant (big endian or “human readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints both digits of every byte even if leading digit is zero. -Example: - **Example** Query: @@ -151,10 +149,62 @@ Result: └──────────────────┘ ``` -## unhex(str) {#unhexstr} +## unhex {#unhexstr} -Accepts a string containing any number of hexadecimal digits, and returns a string containing the corresponding bytes. Supports both uppercase and lowercase letters A-F. The number of hexadecimal digits does not have to be even. If it is odd, the last digit is interpreted as the least significant half of the 00-0F byte. If the argument string contains anything other than hexadecimal digits, some implementation-defined result is returned (an exception isn’t thrown). -If you want to convert the result to a number, you can use the ‘reverse’ and ‘reinterpretAsType’ functions. +Performs the opposite operation of [hex](#hex). It interprets each pair of hexadecimal digits (in the argument) as a number and converts it to the byte represented by the number. The return value is a binary string (BLOB). + +If you want to convert the result to a number, you can use the [reverse](../../sql-reference/functions/string-functions.md#reverse) and [reinterpretAs](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions) functions. + +!!! note "Note" + If `unhex` is invoked from within the `clickhouse-client`, binary strings display using UTF-8. + +Alias: `UNHEX`. + +**Syntax** + +``` sql +unhex(arg) +``` + +**Arguments** + +- `arg` — A string containing any number of hexadecimal digits. Type: [String](../../sql-reference/data-types/string.md). + +Supports both uppercase and lowercase letters `A-F`. The number of hexadecimal digits does not have to be even. If it is odd, the last digit is interpreted as the least significant half of the `00-0F` byte. If the argument string contains anything other than hexadecimal digits, some implementation-defined result is returned (an exception isn’t thrown). For a numeric argument the inverse of hex(N) is not performed by unhex(). + +**Returned value** + +- A binary string (BLOB). + +Type: [String](../../sql-reference/data-types/string.md). + +**Example** + +Query: +``` sql +SELECT unhex('303132'), UNHEX('4D7953514C'); +``` + +Result: +``` text +┌─unhex('303132')─┬─unhex('4D7953514C')─┐ +│ 012 │ MySQL │ +└─────────────────┴─────────────────────┘ +``` + +Query: + +``` sql +SELECT reinterpretAsUInt64(reverse(unhex('FFF'))) AS num; +``` + +Result: + +``` text +┌──num─┐ +│ 4095 │ +└──────┘ +``` ## UUIDStringToNum(str) {#uuidstringtonumstr} @@ -171,4 +221,3 @@ Accepts an integer. Returns a string containing the list of powers of two that t ## bitmaskToArray(num) {#bitmasktoarraynum} Accepts an integer. Returns an array of UInt64 numbers containing the list of powers of two that total the source number when summed. Numbers in the array are in ascending order. - diff --git a/docs/en/sql-reference/functions/ext-dict-functions.md b/docs/en/sql-reference/functions/ext-dict-functions.md index 5fc146f603f..7c0fe11ae64 100644 --- a/docs/en/sql-reference/functions/ext-dict-functions.md +++ b/docs/en/sql-reference/functions/ext-dict-functions.md @@ -4,27 +4,28 @@ toc_title: External Dictionaries --- !!! attention "Attention" - `dict_name` parameter must be fully qualified for dictionaries created with DDL queries. Eg. `.`. + For dictionaries, created with [DDL queries](../../sql-reference/statements/create/dictionary.md), the `dict_name` parameter must be fully specified, like `.`. Otherwise, the current database is used. # Functions for Working with External Dictionaries {#ext_dict_functions} For information on connecting and configuring external dictionaries, see [External dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). -## dictGet {#dictget} +## dictGet, dictGetOrDefault, dictGetOrNull {#dictget} -Retrieves a value from an external dictionary. +Retrieves values from an external dictionary. ``` sql -dictGet('dict_name', 'attr_name', id_expr) -dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +dictGet('dict_name', attr_names, id_expr) +dictGetOrDefault('dict_name', attr_names, id_expr, default_value_expr) +dictGetOrNull('dict_name', attr_name, id_expr) ``` **Arguments** - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). -- `attr_name` — Name of the column of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). +- `attr_names` — Name of the column of the dictionary, [String literal](../../sql-reference/syntax.md#syntax-string-literal), or tuple of column names, [Tuple](../../sql-reference/data-types/tuple.md)([String literal](../../sql-reference/syntax.md#syntax-string-literal)). - `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md) or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. -- `default_value_expr` — Value returned if the dictionary doesn’t contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning the value in the data type configured for the `attr_name` attribute. +- `default_value_expr` — Values returned if the dictionary does not contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) or [Tuple](../../sql-reference/data-types/tuple.md)([Expression](../../sql-reference/syntax.md#syntax-expressions)), returning the value (or values) in the data types configured for the `attr_names` attribute. **Returned value** @@ -34,12 +35,13 @@ dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dictGet` returns the content of the `` element specified for the attribute in the dictionary configuration. - `dictGetOrDefault` returns the value passed as the `default_value_expr` parameter. + - `dictGetOrNull` returns `NULL` in case key was not found in dictionary. -ClickHouse throws an exception if it cannot parse the value of the attribute or the value doesn’t match the attribute data type. +ClickHouse throws an exception if it cannot parse the value of the attribute or the value does not match the attribute data type. -**Example** +**Example for simple key dictionary** -Create a text file `ext-dict-text.csv` containing the following: +Create a text file `ext-dict-test.csv` containing the following: ``` text 1,1 @@ -96,6 +98,130 @@ LIMIT 3 └─────┴────────┘ ``` +**Example for complex key dictionary** + +Create a text file `ext-dict-mult.csv` containing the following: + +``` text +1,1,'1' +2,2,'2' +3,3,'3' +``` + +The first column is `id`, the second is `c1`, the third is `c2`. + +Configure the external dictionary: + +``` xml + + + ext-dict-mult + + + /path-to/ext-dict-mult.csv + CSV + + + + + + + + id + + + c1 + UInt32 + + + + c2 + String + + + + 0 + + +``` + +Perform the query: + +``` sql +SELECT + dictGet('ext-dict-mult', ('c1','c2'), number) AS val, + toTypeName(val) AS type +FROM system.numbers +LIMIT 3; +``` + +``` text +┌─val─────┬─type──────────────────┐ +│ (1,'1') │ Tuple(UInt8, String) │ +│ (2,'2') │ Tuple(UInt8, String) │ +│ (3,'3') │ Tuple(UInt8, String) │ +└─────────┴───────────────────────┘ +``` + +**Example for range key dictionary** + +Input table: + +```sql +CREATE TABLE range_key_dictionary_source_table +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +ENGINE = TinyLog(); + +INSERT INTO range_key_dictionary_source_table VALUES(1, toDate('2019-05-20'), toDate('2019-05-20'), 'First', 'First'); +INSERT INTO range_key_dictionary_source_table VALUES(2, toDate('2019-05-20'), toDate('2019-05-20'), 'Second', NULL); +INSERT INTO range_key_dictionary_source_table VALUES(3, toDate('2019-05-20'), toDate('2019-05-20'), 'Third', 'Third'); +``` + +Create the external dictionary: + +```sql +CREATE DICTIONARY range_key_dictionary +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_key_dictionary_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); +``` + +Perform the query: + +``` sql +SELECT + (number, toDate('2019-05-20')), + dictHas('range_key_dictionary', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value_nullable', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', ('value', 'value_nullable'), number, toDate('2019-05-20')) +FROM system.numbers LIMIT 5 FORMAT TabSeparated; +``` +Result: + +``` text +(0,'2019-05-20') 0 \N \N (NULL,NULL) +(1,'2019-05-20') 1 First First ('First','First') +(2,'2019-05-20') 0 \N \N (NULL,NULL) +(3,'2019-05-20') 0 \N \N (NULL,NULL) +(4,'2019-05-20') 0 \N \N (NULL,NULL) +``` + **See Also** - [External Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) @@ -162,6 +288,119 @@ dictIsIn('dict_name', child_id_expr, ancestor_id_expr) Type: `UInt8`. +## dictGetChildren {#dictgetchildren} + +Returns first-level children as an array of indexes. It is the inverse transformation for [dictGetHierarchy](#dictgethierarchy). + +**Syntax** + +``` sql +dictGetChildren(dict_name, key) +``` + +**Arguments** + +- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). +- `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value. + +**Returned values** + +- First-level descendants for the key. + +Type: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Consider the hierarchic dictionary: + +``` text +┌─id─┬─parent_id─┐ +│ 1 │ 0 │ +│ 2 │ 1 │ +│ 3 │ 1 │ +│ 4 │ 2 │ +└────┴───────────┘ +``` + +First-level children: + +``` sql +SELECT dictGetChildren('hierarchy_flat_dictionary', number) FROM system.numbers LIMIT 4; +``` + +``` text +┌─dictGetChildren('hierarchy_flat_dictionary', number)─┐ +│ [1] │ +│ [2,3] │ +│ [4] │ +│ [] │ +└──────────────────────────────────────────────────────┘ +``` + +## dictGetDescendant {#dictgetdescendant} + +Returns all descendants as if [dictGetChildren](#dictgetchildren) function was applied `level` times recursively. + +**Syntax** + +``` sql +dictGetDescendants(dict_name, key, level) +``` + +**Arguments** + +- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). +- `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value. +- `level` — Hierarchy level. If `level = 0` returns all descendants to the end. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned values** + +- Descendants for the key. + +Type: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Consider the hierarchic dictionary: + +``` text +┌─id─┬─parent_id─┐ +│ 1 │ 0 │ +│ 2 │ 1 │ +│ 3 │ 1 │ +│ 4 │ 2 │ +└────┴───────────┘ +``` +All descendants: + +``` sql +SELECT dictGetDescendants('hierarchy_flat_dictionary', number) FROM system.numbers LIMIT 4; +``` + +``` text +┌─dictGetDescendants('hierarchy_flat_dictionary', number)─┐ +│ [1,2,3,4] │ +│ [2,3,4] │ +│ [4] │ +│ [] │ +└─────────────────────────────────────────────────────────┘ +``` + +First-level descendants: + +``` sql +SELECT dictGetDescendants('hierarchy_flat_dictionary', number, 1) FROM system.numbers LIMIT 4; +``` + +``` text +┌─dictGetDescendants('hierarchy_flat_dictionary', number, 1)─┐ +│ [1] │ +│ [2,3] │ +│ [4] │ +│ [] │ +└────────────────────────────────────────────────────────────┘ +``` + ## Other Functions {#ext_dict_functions-other} ClickHouse supports specialized functions that convert dictionary attribute values to a specific data type regardless of the dictionary configuration. @@ -190,7 +429,7 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). - `attr_name` — Name of the column of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). - `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md) or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. -- `default_value_expr` — Value returned if the dictionary doesn’t contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning the value in the data type configured for the `attr_name` attribute. +- `default_value_expr` — Value returned if the dictionary does not contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning the value in the data type configured for the `attr_name` attribute. **Returned value** @@ -201,5 +440,4 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dictGet[Type]` returns the content of the `` element specified for the attribute in the dictionary configuration. - `dictGet[Type]OrDefault` returns the value passed as the `default_value_expr` parameter. -ClickHouse throws an exception if it cannot parse the value of the attribute or the value doesn’t match the attribute data type. - +ClickHouse throws an exception if it cannot parse the value of the attribute or the value does not match the attribute data type. diff --git a/docs/en/sql-reference/functions/files.md b/docs/en/sql-reference/functions/files.md new file mode 100644 index 00000000000..9cbf8932465 --- /dev/null +++ b/docs/en/sql-reference/functions/files.md @@ -0,0 +1,35 @@ +--- +toc_priority: 43 +toc_title: Files +--- + +# Functions for Working with Files {#functions-for-working-with-files} + +## file {#file} + +Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column. + +**Syntax** + +``` sql +file(path) +``` + +**Arguments** + +- `path` — The relative path to the file from [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following wildcards: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings. + +**Example** + +Inserting data from files a.txt and b.txt into a table as strings: + +Query: + +``` sql +INSERT INTO table SELECT file('a.txt'), file('b.txt'); +``` + +**See Also** + +- [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path) +- [file](../table-functions/file.md) diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index 5cc95fe298a..c06711b3cd2 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -224,7 +224,7 @@ assumeNotNull(x) **Returned values** - The original value from the non-`Nullable` type, if it is not `NULL`. -- The default value for the non-`Nullable` type if the original value was `NULL`. +- Implementation specific result if the original value was `NULL`. **Example** diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index 945ede4927f..35a42c49a41 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -7,6 +7,8 @@ toc_title: Hash Hash functions can be used for the deterministic pseudo-random shuffling of elements. +Simhash is a hash function, which returns close hash values for close (similar) arguments. + ## halfMD5 {#hash-functions-halfmd5} [Interprets](../../sql-reference/functions/type-conversion-functions.md#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the [MD5](https://en.wikipedia.org/wiki/MD5) hash value for each of them. Then combines hashes, takes the first 8 bytes of the hash of the resulting string, and interprets them as `UInt64` in big-endian byte order. @@ -41,7 +43,7 @@ SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00') ## MD5 {#hash_functions-md5} Calculates the MD5 from a string and returns the resulting set of bytes as FixedString(16). -If you don’t need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the ‘sipHash128’ function instead. +If you do not need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the ‘sipHash128’ function instead. If you want to get the same result as output by the md5sum utility, use lower(hex(MD5(s))). ## sipHash64 {#hash_functions-siphash64} @@ -435,13 +437,13 @@ A [FixedString(16)](../../sql-reference/data-types/fixedstring.md) data type has **Example** ``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type; +SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text -┌─MurmurHash3──────┬─type────────────┐ -│ 6�1�4"S5KT�~~q │ FixedString(16) │ -└──────────────────┴─────────────────┘ +┌─MurmurHash3──────────────────────┬─type───┐ +│ 368A1A311CB7342253354B548E7E7E71 │ String │ +└──────────────────────────────────┴────────┘ ``` ## xxHash32, xxHash64 {#hash-functions-xxhash32} @@ -482,3 +484,938 @@ Result: - [xxHash](http://cyan4973.github.io/xxHash/). +## ngramSimHash {#ngramsimhash} + +Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case sensitive. + +Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same. + +**Syntax** + +``` sql +ngramSimHash(string[, ngramsize]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Hash value. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT ngramSimHash('ClickHouse') AS Hash; +``` + +Result: + +``` text +┌───────Hash─┐ +│ 1627567969 │ +└────────────┘ +``` + +## ngramSimHashCaseInsensitive {#ngramsimhashcaseinsensitive} + +Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case insensitive. + +Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same. + +**Syntax** + +``` sql +ngramSimHashCaseInsensitive(string[, ngramsize]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Hash value. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash; +``` + +Result: + +``` text +┌──────Hash─┐ +│ 562180645 │ +└───────────┘ +``` + +## ngramSimHashUTF8 {#ngramsimhashutf8} + +Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case sensitive. + +Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same. + +**Syntax** + +``` sql +ngramSimHashUTF8(string[, ngramsize]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Hash value. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT ngramSimHashUTF8('ClickHouse') AS Hash; +``` + +Result: + +``` text +┌───────Hash─┐ +│ 1628157797 │ +└────────────┘ +``` + +## ngramSimHashCaseInsensitiveUTF8 {#ngramsimhashcaseinsensitiveutf8} + +Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case insensitive. + +Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same. + +**Syntax** + +``` sql +ngramSimHashCaseInsensitiveUTF8(string[, ngramsize]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Hash value. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash; +``` + +Result: + +``` text +┌───────Hash─┐ +│ 1636742693 │ +└────────────┘ +``` + +## wordShingleSimHash {#wordshinglesimhash} + +Splits a ASCII string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case sensitive. + +Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same. + +**Syntax** + +``` sql +wordShingleSimHash(string[, shinglesize]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Hash value. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; +``` + +Result: + +``` text +┌───────Hash─┐ +│ 2328277067 │ +└────────────┘ +``` + +## wordShingleSimHashCaseInsensitive {#wordshinglesimhashcaseinsensitive} + +Splits a ASCII string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case insensitive. + +Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same. + +**Syntax** + +``` sql +wordShingleSimHashCaseInsensitive(string[, shinglesize]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Hash value. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; +``` + +Result: + +``` text +┌───────Hash─┐ +│ 2194812424 │ +└────────────┘ +``` + +## wordShingleSimHashUTF8 {#wordshinglesimhashutf8} + +Splits a UTF-8 string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case sensitive. + +Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same. + +**Syntax** + +``` sql +wordShingleSimHashUTF8(string[, shinglesize]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optinal. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Hash value. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; +``` + +Result: + +``` text +┌───────Hash─┐ +│ 2328277067 │ +└────────────┘ +``` + +## wordShingleSimHashCaseInsensitiveUTF8 {#wordshinglesimhashcaseinsensitiveutf8} + +Splits a UTF-8 string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case insensitive. + +Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same. + +**Syntax** + +``` sql +wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Hash value. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; +``` + +Result: + +``` text +┌───────Hash─┐ +│ 2194812424 │ +└────────────┘ +``` + +## ngramMinHash {#ngramminhash} + +Splits a ASCII string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive. + +Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same. + +**Syntax** + +``` sql +ngramMinHash(string[, ngramsize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two hashes — the minimum and the maximum. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Query: + +``` sql +SELECT ngramMinHash('ClickHouse') AS Tuple; +``` + +Result: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (18333312859352735453,9054248444481805918) │ +└────────────────────────────────────────────┘ +``` + +## ngramMinHashCaseInsensitive {#ngramminhashcaseinsensitive} + +Splits a ASCII string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive. + +Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same. + +**Syntax** + +``` sql +ngramMinHashCaseInsensitive(string[, ngramsize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two hashes — the minimum and the maximum. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Query: + +``` sql +SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple; +``` + +Result: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (2106263556442004574,13203602793651726206) │ +└────────────────────────────────────────────┘ +``` + +## ngramMinHashUTF8 {#ngramminhashutf8} + +Splits a UTF-8 string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive. + +Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same. + +**Syntax** + +``` sql +ngramMinHashUTF8(string[, ngramsize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two hashes — the minimum and the maximum. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Query: + +``` sql +SELECT ngramMinHashUTF8('ClickHouse') AS Tuple; +``` + +Result: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (18333312859352735453,6742163577938632877) │ +└────────────────────────────────────────────┘ +``` + +## ngramMinHashCaseInsensitiveUTF8 {#ngramminhashcaseinsensitiveutf8} + +Splits a UTF-8 string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive. + +Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same. + +**Syntax** + +``` sql +ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two hashes — the minimum and the maximum. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Query: + +``` sql +SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple; +``` + +Result: + +``` text +┌─Tuple───────────────────────────────────────┐ +│ (12493625717655877135,13203602793651726206) │ +└─────────────────────────────────────────────┘ +``` + +## ngramMinHashArg {#ngramminhasharg} + +Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHash](#ngramminhash) function with the same input. Is case sensitive. + +**Syntax** + +``` sql +ngramMinHashArg(string[, ngramsize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two tuples with `hashnum` n-grams each. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Example** + +Query: + +``` sql +SELECT ngramMinHashArg('ClickHouse') AS Tuple; +``` + +Result: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────────────┐ +│ (('ous','ick','lic','Hou','kHo','use'),('Hou','lic','ick','ous','ckH','Cli')) │ +└───────────────────────────────────────────────────────────────────────────────┘ +``` + +## ngramMinHashArgCaseInsensitive {#ngramminhashargcaseinsensitive} + +Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashCaseInsensitive](#ngramminhashcaseinsensitive) function with the same input. Is case insensitive. + +**Syntax** + +``` sql +ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two tuples with `hashnum` n-grams each. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Example** + +Query: + +``` sql +SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple; +``` + +Result: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────────────┐ +│ (('ous','ick','lic','kHo','use','Cli'),('kHo','lic','ick','ous','ckH','Hou')) │ +└───────────────────────────────────────────────────────────────────────────────┘ +``` + +## ngramMinHashArgUTF8 {#ngramminhashargutf8} + +Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashUTF8](#ngramminhashutf8) function with the same input. Is case sensitive. + +**Syntax** + +``` sql +ngramMinHashArgUTF8(string[, ngramsize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two tuples with `hashnum` n-grams each. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Example** + +Query: + +``` sql +SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple; +``` + +Result: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────────────┐ +│ (('ous','ick','lic','Hou','kHo','use'),('kHo','Hou','lic','ick','ous','ckH')) │ +└───────────────────────────────────────────────────────────────────────────────┘ +``` + +## ngramMinHashArgCaseInsensitiveUTF8 {#ngramminhashargcaseinsensitiveutf8} + +Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashCaseInsensitiveUTF8](#ngramminhashcaseinsensitiveutf8) function with the same input. Is case insensitive. + +**Syntax** + +``` sql +ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two tuples with `hashnum` n-grams each. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Example** + +Query: + +``` sql +SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple; +``` + +Result: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────────────┐ +│ (('ckH','ous','ick','lic','kHo','use'),('kHo','lic','ick','ous','ckH','Hou')) │ +└───────────────────────────────────────────────────────────────────────────────┘ +``` + +## wordShingleMinHash {#wordshingleminhash} + +Splits a ASCII string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive. + +Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same. + +**Syntax** + +``` sql +wordShingleMinHash(string[, shinglesize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two hashes — the minimum and the maximum. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Query: + +``` sql +SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; +``` + +Result: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (16452112859864147620,5844417301642981317) │ +└────────────────────────────────────────────┘ +``` + +## wordShingleMinHashCaseInsensitive {#wordshingleminhashcaseinsensitive} + +Splits a ASCII string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive. + +Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same. + +**Syntax** + +``` sql +wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two hashes — the minimum and the maximum. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Query: + +``` sql +SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; +``` + +Result: + +``` text +┌─Tuple─────────────────────────────────────┐ +│ (3065874883688416519,1634050779997673240) │ +└───────────────────────────────────────────┘ +``` + +## wordShingleMinHashUTF8 {#wordshingleminhashutf8} + +Splits a UTF-8 string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive. + +Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same. + +**Syntax** + +``` sql +wordShingleMinHashUTF8(string[, shinglesize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two hashes — the minimum and the maximum. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Query: + +``` sql +SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; +``` + +Result: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (16452112859864147620,5844417301642981317) │ +└────────────────────────────────────────────┘ +``` + +## wordShingleMinHashCaseInsensitiveUTF8 {#wordshingleminhashcaseinsensitiveutf8} + +Splits a UTF-8 string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive. + +Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same. + +**Syntax** + +``` sql +wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two hashes — the minimum and the maximum. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Query: + +``` sql +SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; +``` + +Result: + +``` text +┌─Tuple─────────────────────────────────────┐ +│ (3065874883688416519,1634050779997673240) │ +└───────────────────────────────────────────┘ +``` + +## wordShingleMinHashArg {#wordshingleminhasharg} + +Splits a ASCII string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordshingleMinHash](#wordshingleminhash) function with the same input. Is case sensitive. + +**Syntax** + +``` sql +wordShingleMinHashArg(string[, shinglesize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two tuples with `hashnum` word shingles each. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Example** + +Query: + +``` sql +SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; +``` + +Result: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────┐ +│ (('OLAP','database','analytical'),('online','oriented','processing')) │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +## wordShingleMinHashArgCaseInsensitive {#wordshingleminhashargcaseinsensitive} + +Splits a ASCII string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashCaseInsensitive](#wordshingleminhashcaseinsensitive) function with the same input. Is case insensitive. + +**Syntax** + +``` sql +wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two tuples with `hashnum` word shingles each. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Example** + +Query: + +``` sql +SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; +``` + +Result: + +``` text +┌─Tuple──────────────────────────────────────────────────────────────────┐ +│ (('queries','database','analytical'),('oriented','processing','DBMS')) │ +└────────────────────────────────────────────────────────────────────────┘ +``` + +## wordShingleMinHashArgUTF8 {#wordshingleminhashargutf8} + +Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashUTF8](#wordshingleminhashutf8) function with the same input. Is case sensitive. + +**Syntax** + +``` sql +wordShingleMinHashArgUTF8(string[, shinglesize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two tuples with `hashnum` word shingles each. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Example** + +Query: + +``` sql +SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; +``` + +Result: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────┐ +│ (('OLAP','database','analytical'),('online','oriented','processing')) │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +## wordShingleMinHashArgCaseInsensitiveUTF8 {#wordshingleminhashargcaseinsensitiveutf8} + +Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashCaseInsensitiveUTF8](#wordshingleminhashcaseinsensitiveutf8) function with the same input. Is case insensitive. + +**Syntax** + +``` sql +wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum]) +``` + +**Arguments** + +- `string` — String. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Tuple with two tuples with `hashnum` word shingles each. + +Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Example** + +Query: + +``` sql +SELECT wordShingleMinHashArgCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; +``` + +Result: + +``` text +┌─Tuple──────────────────────────────────────────────────────────────────┐ +│ (('queries','database','analytical'),('oriented','processing','DBMS')) │ +└────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index 32408759b98..58e0994a11d 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -6,7 +6,7 @@ toc_title: Introduction # Functions {#functions} -There are at least\* two types of functions - regular functions (they are just called “functions”) and aggregate functions. These are completely different concepts. Regular functions work as if they are applied to each row separately (for each row, the result of the function doesn’t depend on the other rows). Aggregate functions accumulate a set of values from various rows (i.e. they depend on the entire set of rows). +There are at least\* two types of functions - regular functions (they are just called “functions”) and aggregate functions. These are completely different concepts. Regular functions work as if they are applied to each row separately (for each row, the result of the function does not depend on the other rows). Aggregate functions accumulate a set of values from various rows (i.e. they depend on the entire set of rows). In this section we discuss regular functions. For aggregate functions, see the section “Aggregate functions”. @@ -14,7 +14,7 @@ In this section we discuss regular functions. For aggregate functions, see the s ## Strong Typing {#strong-typing} -In contrast to standard SQL, ClickHouse has strong typing. In other words, it doesn’t make implicit conversions between types. Each function works for a specific set of types. This means that sometimes you need to use type conversion functions. +In contrast to standard SQL, ClickHouse has strong typing. In other words, it does not make implicit conversions between types. Each function works for a specific set of types. This means that sometimes you need to use type conversion functions. ## Common Subexpression Elimination {#common-subexpression-elimination} @@ -78,7 +78,7 @@ For example, in the query `SELECT f(sum(g(x))) FROM distributed_table GROUP BY h - if a `distributed_table` has at least two shards, the functions ‘g’ and ‘h’ are performed on remote servers, and the function ‘f’ is performed on the requestor server. - if a `distributed_table` has only one shard, all the ‘f’, ‘g’, and ‘h’ functions are performed on this shard’s server. -The result of a function usually doesn’t depend on which server it is performed on. However, sometimes this is important. +The result of a function usually does not depend on which server it is performed on. However, sometimes this is important. For example, functions that work with dictionaries use the dictionary that exists on the server they are running on. Another example is the `hostName` function, which returns the name of the server it is running on in order to make `GROUP BY` by servers in a `SELECT` query. diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index a24cd464946..137ebc2407d 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -48,7 +48,7 @@ LIMIT 10 └────────────────┴───────┘ ``` -Since using ‘xxx’ is highly unusual, this may be changed in the future. We recommend that you don’t rely on the exact format of this fragment. +Since using ‘xxx’ is highly unusual, this may be changed in the future. We recommend that you do not rely on the exact format of this fragment. ### IPv6NumToString(x) {#ipv6numtostringx} @@ -394,3 +394,55 @@ Result: └──────────────────┴────────────────────┘ ``` +## isIPAddressInRange {#isipaddressinrange} + +Determines if an IP address is contained in a network represented in the [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation. Returns `1` if true, or `0` otherwise. + +**Syntax** + +``` sql +isIPAddressInRange(address, prefix) +``` + +This function accepts both IPv4 and IPv6 addresses (and networks) represented as strings. It returns `0` if the IP version of the address and the CIDR don't match. + +**Arguments** + +- `address` — An IPv4 or IPv6 address. [String](../../sql-reference/data-types/string.md). +- `prefix` — An IPv4 or IPv6 network prefix in CIDR. [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- `1` or `0`. + +Type: [UInt8](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT isIPAddressInRange('127.0.0.1', '127.0.0.0/8'); +``` + +Result: + +``` text +┌─isIPAddressInRange('127.0.0.1', '127.0.0.0/8')─┐ +│ 1 │ +└────────────────────────────────────────────────┘ +``` + +Query: + +``` sql +SELECT isIPAddressInRange('127.0.0.1', 'ffff::/16'); +``` + +Result: + +``` text +┌─isIPAddressInRange('127.0.0.1', 'ffff::/16')─┐ +│ 0 │ +└──────────────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index ca6ef684faf..e731180c393 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -12,50 +12,64 @@ The following assumptions are made: 1. The field name (function argument) must be a constant. 2. The field name is somehow canonically encoded in JSON. For example: `visitParamHas('{"abc":"def"}', 'abc') = 1`, but `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` 3. Fields are searched for on any nesting level, indiscriminately. If there are multiple matching fields, the first occurrence is used. -4. The JSON doesn’t have space characters outside of string literals. +4. The JSON does not have space characters outside of string literals. ## visitParamHas(params, name) {#visitparamhasparams-name} -Checks whether there is a field with the ‘name’ name. +Checks whether there is a field with the `name` name. + +Alias: `simpleJSONHas`. ## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name} -Parses UInt64 from the value of the field named ‘name’. If this is a string field, it tries to parse a number from the beginning of the string. If the field doesn’t exist, or it exists but doesn’t contain a number, it returns 0. +Parses UInt64 from the value of the field named `name`. If this is a string field, it tries to parse a number from the beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns 0. + +Alias: `simpleJSONExtractUInt`. ## visitParamExtractInt(params, name) {#visitparamextractintparams-name} The same as for Int64. +Alias: `simpleJSONExtractInt`. + ## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name} The same as for Float64. +Alias: `simpleJSONExtractFloat`. + ## visitParamExtractBool(params, name) {#visitparamextractboolparams-name} Parses a true/false value. The result is UInt8. +Alias: `simpleJSONExtractBool`. + ## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name} Returns the value of a field, including separators. +Alias: `simpleJSONExtractRaw`. + Examples: ``` sql -visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' -visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' +visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"'; +visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}'; ``` ## visitParamExtractString(params, name) {#visitparamextractstringparams-name} Parses the string in double quotes. The value is unescaped. If unescaping failed, it returns an empty string. +Alias: `simpleJSONExtractString`. + Examples: ``` sql -visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' -visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' -visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' -visitParamExtractString('{"abc":"hello}', 'abc') = '' +visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0'; +visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺'; +visitParamExtractString('{"abc":"\\u263"}', 'abc') = ''; +visitParamExtractString('{"abc":"hello}', 'abc') = ''; ``` There is currently no support for code points in the format `\uXXXX\uYYYY` that are not from the basic multilingual plane (they are converted to CESU-8 instead of UTF-8). @@ -92,7 +106,7 @@ SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4) = 0 - Positive integer = access the n-th member/key from the beginning. - Negative integer = access the n-th member/key from the end. -Minimum index of the element is 1. Thus the element 0 doesn’t exist. +Minimum index of the element is 1. Thus the element 0 does not exist. You may use integers to access both JSON arrays and JSON objects. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index c40aa3d1eae..8163650efab 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -696,10 +696,6 @@ Returns the server’s uptime in seconds. Returns the version of the server as a string. -## timezone() {#timezone} - -Returns the timezone of the server. - ## blockNumber {#blocknumber} Returns the sequence number of the data block where the row is located. @@ -1192,6 +1188,109 @@ SELECT defaultValueOfTypeName('Nullable(Int8)') └──────────────────────────────────────────┘ ``` +## indexHint {#indexhint} +The function is intended for debugging and introspection purposes. The function ignores it's argument and always returns 1. Arguments are not even evaluated. + +But for the purpose of index analysis, the argument of this function is analyzed as if it was present directly without being wrapped inside `indexHint` function. This allows to select data in index ranges by the corresponding condition but without further filtering by this condition. The index in ClickHouse is sparse and using `indexHint` will yield more data than specifying the same condition directly. + +**Syntax** + +```sql +SELECT * FROM table WHERE indexHint() +``` + +**Returned value** + +1. Type: [Uint8](https://clickhouse.yandex/docs/en/data_types/int_uint/#diapazony-uint). + +**Example** + +Here is the example of test data from the table [ontime](../../getting-started/example-datasets/ontime.md). + +Input table: + +```sql +SELECT count() FROM ontime +``` + +```text +┌─count()─┐ +│ 4276457 │ +└─────────┘ +``` + +The table has indexes on the fields `(FlightDate, (Year, FlightDate))`. + +Create a query, where the index is not used. + +Query: + +```sql +SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k +``` + +ClickHouse processed the entire table (`Processed 4.28 million rows`). + +Result: + +```text +┌──────────k─┬─count()─┐ +│ 2017-01-01 │ 13970 │ +│ 2017-01-02 │ 15882 │ +........................ +│ 2017-09-28 │ 16411 │ +│ 2017-09-29 │ 16384 │ +│ 2017-09-30 │ 12520 │ +└────────────┴─────────┘ +``` + +To apply the index, select a specific date. + +Query: + +```sql +SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k +``` + +By using the index, ClickHouse processed a significantly smaller number of rows (`Processed 32.74 thousand rows`). + +Result: + +```text +┌──────────k─┬─count()─┐ +│ 2017-09-15 │ 16428 │ +└────────────┴─────────┘ +``` + +Now wrap the expression `k = '2017-09-15'` into `indexHint` function. + +Query: + +```sql +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE indexHint(k = '2017-09-15') +GROUP BY k +ORDER BY k ASC +``` + +ClickHouse used the index in the same way as the previous time (`Processed 32.74 thousand rows`). +The expression `k = '2017-09-15'` was not used when generating the result. +In examle the `indexHint` function allows to see adjacent dates. + +Result: + +```text +┌──────────k─┬─count()─┐ +│ 2017-09-14 │ 7071 │ +│ 2017-09-15 │ 16428 │ +│ 2017-09-16 │ 1077 │ +│ 2017-09-30 │ 8167 │ +└────────────┴─────────┘ +``` + ## replicate {#other-functions-replicate} Creates an array with a single value. @@ -1555,7 +1654,7 @@ joinGet(join_storage_table_name, `value_column`, join_keys) Returns list of values corresponded to list of keys. -If certain doesn’t exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting. +If certain does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting. More info about `join_use_nulls` in [Join operation](../../engines/table-engines/special/join.md). @@ -1615,7 +1714,7 @@ Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many. ## identity {#identity} -Returns the same value that was used as its argument. Used for debugging and testing, allows to cancel using index, and get the query performance of a full scan. When query is analyzed for possible use of index, the analyzer doesn’t look inside `identity` functions. Also constant folding is not applied too. +Returns the same value that was used as its argument. Used for debugging and testing, allows to cancel using index, and get the query performance of a full scan. When query is analyzed for possible use of index, the analyzer does not look inside `identity` functions. Also constant folding is not applied too. **Syntax** diff --git a/docs/en/sql-reference/functions/rounding-functions.md b/docs/en/sql-reference/functions/rounding-functions.md index c0bd44a6467..5f74c6329d1 100644 --- a/docs/en/sql-reference/functions/rounding-functions.md +++ b/docs/en/sql-reference/functions/rounding-functions.md @@ -14,7 +14,7 @@ Returns the largest round number that is less than or equal to `x`. A round numb Examples: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` `x` is any numeric type. The result is a number of the same type. -For integer arguments, it makes sense to round with a negative `N` value (for non-negative `N`, the function doesn’t do anything). +For integer arguments, it makes sense to round with a negative `N` value (for non-negative `N`, the function does not do anything). If rounding causes overflow (for example, floor(-128, -1)), an implementation-specific result is returned. ## ceil(x\[, N\]), ceiling(x\[, N\]) {#ceilx-n-ceilingx-n} diff --git a/docs/en/sql-reference/functions/splitting-merging-functions.md b/docs/en/sql-reference/functions/splitting-merging-functions.md index bd7e209549c..2d384f1aa3c 100644 --- a/docs/en/sql-reference/functions/splitting-merging-functions.md +++ b/docs/en/sql-reference/functions/splitting-merging-functions.md @@ -13,7 +13,7 @@ Returns an array of selected substrings. Empty substrings may be selected if the **Syntax** ``` sql -splitByChar(, ) +splitByChar(separator, s) ``` **Arguments** @@ -29,12 +29,12 @@ Returns an array of selected substrings. Empty substrings may be selected when: - There are multiple consecutive separators; - The original string `s` is empty. -Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md). +Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). **Example** ``` sql -SELECT splitByChar(',', '1,2,3,abcde') +SELECT splitByChar(',', '1,2,3,abcde'); ``` ``` text @@ -50,7 +50,7 @@ Splits a string into substrings separated by a string. It uses a constant string **Syntax** ``` sql -splitByString(, ) +splitByString(separator, s) ``` **Arguments** @@ -62,7 +62,7 @@ splitByString(, ) Returns an array of selected substrings. Empty substrings may be selected when: -Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md). +Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). - A non-empty separator occurs at the beginning or end of the string; - There are multiple consecutive non-empty separators; @@ -71,7 +71,7 @@ Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-ref **Example** ``` sql -SELECT splitByString(', ', '1, 2 3, 4,5, abcde') +SELECT splitByString(', ', '1, 2 3, 4,5, abcde'); ``` ``` text @@ -81,7 +81,7 @@ SELECT splitByString(', ', '1, 2 3, 4,5, abcde') ``` ``` sql -SELECT splitByString('', 'abcde') +SELECT splitByString('', 'abcde'); ``` ``` text @@ -90,6 +90,61 @@ SELECT splitByString('', 'abcde') └────────────────────────────┘ ``` +## splitByRegexp(regexp, s) {#splitbyregexpseparator-s} + +Splits a string into substrings separated by a regular expression. It uses a regular expression string `regexp` as the separator. If the `regexp` is empty, it will split the string `s` into an array of single characters. If no match is found for this regular expression, the string `s` won't be split. + +**Syntax** + +``` sql +splitByRegexp(regexp, s) +``` + +**Arguments** + +- `regexp` — Regular expression. Constant. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md). +- `s` — The string to split. [String](../../sql-reference/data-types/string.md). + +**Returned value(s)** + +Returns an array of selected substrings. Empty substrings may be selected when: + +- A non-empty regular expression match occurs at the beginning or end of the string; +- There are multiple consecutive non-empty regular expression matches; +- The original string `s` is empty while the regular expression is not empty. + +Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). + +**Example** + +Query: + +``` sql +SELECT splitByRegexp('\\d+', 'a12bc23de345f'); +``` + +Result: + +``` text +┌─splitByRegexp('\\d+', 'a12bc23de345f')─┐ +│ ['a','bc','de','f'] │ +└────────────────────────────────────────┘ +``` + +Query: + +``` sql +SELECT splitByRegexp('', 'abcde'); +``` + +Result: + +``` text +┌─splitByRegexp('', 'abcde')─┐ +│ ['a','b','c','d','e'] │ +└────────────────────────────┘ +``` + ## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator} Concatenates the strings listed in the array with the separator.’separator’ is an optional parameter: a constant string, set to an empty string by default. @@ -102,7 +157,7 @@ Selects substrings of consecutive bytes from the ranges a-z and A-Z.Returns an a **Example** ``` sql -SELECT alphaTokens('abca1abc') +SELECT alphaTokens('abca1abc'); ``` ``` text @@ -149,4 +204,3 @@ Result: │ [['abc','123'],['8','"hkl"']] │ └───────────────────────────────────────────────────────────────────────┘ ``` - diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 3d3caaf6e23..5074f478bc0 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -29,17 +29,17 @@ The function also works for arrays. ## lengthUTF8 {#lengthutf8} -Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it doesn’t throw an exception). +Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it does not throw an exception). The result type is UInt64. ## char_length, CHAR_LENGTH {#char-length} -Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it doesn’t throw an exception). +Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it does not throw an exception). The result type is UInt64. ## character_length, CHARACTER_LENGTH {#character-length} -Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it doesn’t throw an exception). +Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it does not throw an exception). The result type is UInt64. ## lower, lcase {#lower} @@ -53,14 +53,14 @@ Converts ASCII Latin symbols in a string to uppercase. ## lowerUTF8 {#lowerutf8} Converts a string to lowercase, assuming the string contains a set of bytes that make up a UTF-8 encoded text. -It doesn’t detect the language. So for Turkish the result might not be exactly correct. +It does not detect the language. So for Turkish the result might not be exactly correct. If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point. If the string contains a set of bytes that is not UTF-8, then the behavior is undefined. ## upperUTF8 {#upperutf8} Converts a string to uppercase, assuming the string contains a set of bytes that make up a UTF-8 encoded text. -It doesn’t detect the language. So for Turkish the result might not be exactly correct. +It does not detect the language. So for Turkish the result might not be exactly correct. If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point. If the string contains a set of bytes that is not UTF-8, then the behavior is undefined. @@ -139,7 +139,7 @@ Reverses the string (as a sequence of bytes). ## reverseUTF8 {#reverseutf8} -Reverses a sequence of Unicode code points, assuming that the string contains a set of bytes representing a UTF-8 text. Otherwise, it does something else (it doesn’t throw an exception). +Reverses a sequence of Unicode code points, assuming that the string contains a set of bytes representing a UTF-8 text. Otherwise, it does something else (it does not throw an exception). ## format(pattern, s0, s1, …) {#format} @@ -264,7 +264,7 @@ Returns a substring starting with the byte from the ‘offset’ index that is ## substringUTF8(s, offset, length) {#substringutf8} -The same as ‘substring’, but for Unicode code points. Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, it returns some result (it doesn’t throw an exception). +The same as ‘substring’, but for Unicode code points. Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, it returns some result (it does not throw an exception). ## appendTrailingCharIfAbsent(s, c) {#appendtrailingcharifabsent} @@ -305,7 +305,7 @@ SELECT startsWith('Spider-Man', 'Spi'); **Returned values** - 1, if the string starts with the specified prefix. -- 0, if the string doesn’t start with the specified prefix. +- 0, if the string does not start with the specified prefix. **Example** @@ -363,7 +363,7 @@ Result: ## trimLeft {#trimleft} -Removes all consecutive occurrences of common whitespace (ASCII character 32) from the beginning of a string. It doesn’t remove other kinds of whitespace characters (tab, no-break space, etc.). +Removes all consecutive occurrences of common whitespace (ASCII character 32) from the beginning of a string. It does not remove other kinds of whitespace characters (tab, no-break space, etc.). **Syntax** @@ -401,7 +401,7 @@ Result: ## trimRight {#trimright} -Removes all consecutive occurrences of common whitespace (ASCII character 32) from the end of a string. It doesn’t remove other kinds of whitespace characters (tab, no-break space, etc.). +Removes all consecutive occurrences of common whitespace (ASCII character 32) from the end of a string. It does not remove other kinds of whitespace characters (tab, no-break space, etc.). **Syntax** @@ -439,7 +439,7 @@ Result: ## trimBoth {#trimboth} -Removes all consecutive occurrences of common whitespace (ASCII character 32) from both ends of a string. It doesn’t remove other kinds of whitespace characters (tab, no-break space, etc.). +Removes all consecutive occurrences of common whitespace (ASCII character 32) from both ends of a string. It does not remove other kinds of whitespace characters (tab, no-break space, etc.). **Syntax** @@ -649,3 +649,65 @@ Result: - [List of XML and HTML character entity references](https://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references) + +## extractTextFromHTML {#extracttextfromhtml} + +A function to extract text from HTML or XHTML. +It does not necessarily 100% conform to any of the HTML, XML or XHTML standards, but the implementation is reasonably accurate and it is fast. The rules are the following: + +1. Comments are skipped. Example: ``. Comment must end with `-->`. Nested comments are not possible. +Note: constructions like `` and `` are not valid comments in HTML but they are skipped by other rules. +2. CDATA is pasted verbatim. Note: CDATA is XML/XHTML specific. But it is processed for "best-effort" approach. +3. `script` and `style` elements are removed with all their content. Note: it is assumed that closing tag cannot appear inside content. For example, in JS string literal has to be escaped like `"<\/script>"`. +Note: comments and CDATA are possible inside `script` or `style` - then closing tags are not searched inside CDATA. Example: `]]>`. But they are still searched inside comments. Sometimes it becomes complicated: ` var y = "-->"; alert(x + y);` +Note: `script` and `style` can be the names of XML namespaces - then they are not treated like usual `script` or `style` elements. Example: `Hello`. +Note: whitespaces are possible after closing tag name: `` but not before: `< / script>`. +4. Other tags or tag-like elements are skipped without inner content. Example: `.` +Note: it is expected that this HTML is illegal: `` +Note: it also skips something like tags: `<>`, ``, etc. +Note: tag without end is skipped to the end of input: `world`, `Helloworld` - there is no whitespace in HTML, but the function inserts it. Also consider: `Hello

world

`, `Hello
world`. This behavior is reasonable for data analysis, e.g. to convert HTML to a bag of words. +7. Also note that correct handling of whitespaces requires the support of `
` and CSS `display` and `white-space` properties.
+
+**Syntax**
+
+``` sql
+extractTextFromHTML(x)
+```
+
+**Arguments**
+
+-   `x` — input text. [String](../../sql-reference/data-types/string.md). 
+
+**Returned value**
+
+-   Extracted text.
+
+Type: [String](../../sql-reference/data-types/string.md).
+
+**Example**
+
+The first example contains several tags and a comment and also shows whitespace processing.
+The second example shows `CDATA` and `script` tag processing.
+In the third example text is extracted from the full HTML response received by the [url](../../sql-reference/table-functions/url.md) function.
+
+Query:
+
+``` sql
+SELECT extractTextFromHTML(' 

A text withtags.

'); +SELECT extractTextFromHTML('CDATA]]> '); +SELECT extractTextFromHTML(html) FROM url('http://www.donothingfor2minutes.com/', RawBLOB, 'html String'); +``` + +Result: + +``` text +A text with tags . +The content within CDATA +Do Nothing for 2 Minutes 2:00   +``` diff --git a/docs/en/sql-reference/functions/string-search-functions.md b/docs/en/sql-reference/functions/string-search-functions.md index 86ed15e368d..551c4aee8f0 100644 --- a/docs/en/sql-reference/functions/string-search-functions.md +++ b/docs/en/sql-reference/functions/string-search-functions.md @@ -12,7 +12,9 @@ The search is case-sensitive by default in all these functions. There are separa ## position(haystack, needle), locate(haystack, needle) {#position} -Returns the position (in bytes) of the found substring in the string, starting from 1. +Searches for the substring `needle` in the string `haystack`. + +Returns the position (in bytes) of the found substring in the string, starting from 1. For a case-insensitive search, use the function [positionCaseInsensitive](#positioncaseinsensitive). @@ -20,15 +22,22 @@ For a case-insensitive search, use the function [positionCaseInsensitive](#posit ``` sql position(haystack, needle[, start_pos]) -``` +``` + +``` sql +position(needle IN haystack) +``` Alias: `locate(haystack, needle[, start_pos])`. +!!! note "Note" + Syntax of `position(needle IN haystack)` provides SQL-compatibility, the function works the same way as to `position(haystack, needle)`. + **Arguments** - `haystack` — String, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). - `needle` — Substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` — Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md). +- `start_pos` – Position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md). Optional. **Returned values** @@ -83,11 +92,41 @@ Result: └───────────────────────────────┘ ``` +**Examples for POSITION(needle IN haystack) syntax** + +Query: + +```sql +SELECT 3 = position('c' IN 'abc'); +``` + +Result: + +```text +┌─equals(3, position('abc', 'c'))─┐ +│ 1 │ +└─────────────────────────────────┘ +``` + +Query: + +```sql +SELECT 6 = position('/' IN s) FROM (SELECT 'Hello/World' AS s); +``` + +Result: + +```text +┌─equals(6, position(s, '/'))─┐ +│ 1 │ +└─────────────────────────────┘ +``` + ## positionCaseInsensitive {#positioncaseinsensitive} The same as [position](#position) returns the position (in bytes) of the found substring in the string, starting from 1. Use the function for a case-insensitive search. -Works under the assumption that the string contains a set of bytes representing a single-byte encoded text. If this assumption is not met and a character can’t be represented using a single byte, the function doesn’t throw an exception and returns some unexpected result. If character can be represented using two bytes, it will use two bytes and so on. +Works under the assumption that the string contains a set of bytes representing a single-byte encoded text. If this assumption is not met and a character can’t be represented using a single byte, the function does not throw an exception and returns some unexpected result. If character can be represented using two bytes, it will use two bytes and so on. **Syntax** @@ -128,7 +167,7 @@ Result: Returns the position (in Unicode points) of the found substring in the string, starting from 1. -Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, the function doesn’t throw an exception and returns some unexpected result. If character can be represented using two Unicode points, it will use two and so on. +Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, the function does not throw an exception and returns some unexpected result. If character can be represented using two Unicode points, it will use two and so on. For a case-insensitive search, use the function [positionCaseInsensitiveUTF8](#positioncaseinsensitiveutf8). @@ -203,7 +242,7 @@ Result: The same as [positionUTF8](#positionutf8), but is case-insensitive. Returns the position (in Unicode points) of the found substring in the string, starting from 1. -Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, the function doesn’t throw an exception and returns some unexpected result. If character can be represented using two Unicode points, it will use two and so on. +Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, the function does not throw an exception and returns some unexpected result. If character can be represented using two Unicode points, it will use two and so on. **Syntax** @@ -310,7 +349,7 @@ For a case-insensitive search or/and in UTF-8 format use functions `multiSearchA Checks whether the string matches the `pattern` regular expression. A `re2` regular expression. The [syntax](https://github.com/google/re2/wiki/Syntax) of the `re2` regular expressions is more limited than the syntax of the Perl regular expressions. -Returns 0 if it doesn’t match, or 1 if it matches. +Returns 0 if it does not match, or 1 if it matches. Note that the backslash symbol (`\`) is used for escaping in the regular expression. The same symbol is used for escaping in string literals. So in order to escape the symbol in a regular expression, you must write two backslashes (\\) in a string literal. @@ -352,11 +391,11 @@ The same as `multiFuzzyMatchAny`, but returns the array of all indices in any or ## extract(haystack, pattern) {#extracthaystack-pattern} -Extracts a fragment of a string using a regular expression. If ‘haystack’ doesn’t match the ‘pattern’ regex, an empty string is returned. If the regex doesn’t contain subpatterns, it takes the fragment that matches the entire regex. Otherwise, it takes the fragment that matches the first subpattern. +Extracts a fragment of a string using a regular expression. If ‘haystack’ does not match the ‘pattern’ regex, an empty string is returned. If the regex does not contain subpatterns, it takes the fragment that matches the entire regex. Otherwise, it takes the fragment that matches the first subpattern. ## extractAll(haystack, pattern) {#extractallhaystack-pattern} -Extracts all the fragments of a string using a regular expression. If ‘haystack’ doesn’t match the ‘pattern’ regex, an empty string is returned. Returns an array of strings consisting of all matches to the regex. In general, the behavior is the same as the ‘extract’ function (it takes the first subpattern, or the entire expression if there isn’t a subpattern). +Extracts all the fragments of a string using a regular expression. If ‘haystack’ does not match the ‘pattern’ regex, an empty string is returned. Returns an array of strings consisting of all matches to the regex. In general, the behavior is the same as the ‘extract’ function (it takes the first subpattern, or the entire expression if there isn’t a subpattern). ## extractAllGroupsHorizontal {#extractallgroups-horizontal} @@ -380,7 +419,7 @@ extractAllGroupsHorizontal(haystack, pattern) - Type: [Array](../../sql-reference/data-types/array.md). -If `haystack` doesn’t match the `pattern` regex, an array of empty arrays is returned. +If `haystack` does not match the `pattern` regex, an array of empty arrays is returned. **Example** @@ -421,7 +460,7 @@ extractAllGroupsVertical(haystack, pattern) - Type: [Array](../../sql-reference/data-types/array.md). -If `haystack` doesn’t match the `pattern` regex, an empty array is returned. +If `haystack` does not match the `pattern` regex, an empty array is returned. **Example** @@ -474,7 +513,7 @@ ilike(haystack, pattern) **Arguments** - `haystack` — Input string. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `pattern` — If `pattern` doesn't contain percent signs or underscores, then the `pattern` only represents the string itself. An underscore (`_`) in `pattern` stands for (matches) any single character. A percent sign (`%`) matches any sequence of zero or more characters. +- `pattern` — If `pattern` does not contain percent signs or underscores, then the `pattern` only represents the string itself. An underscore (`_`) in `pattern` stands for (matches) any single character. A percent sign (`%`) matches any sequence of zero or more characters. Some `pattern` examples: @@ -488,7 +527,7 @@ Some `pattern` examples: **Returned values** - True, if the string matches `pattern`. -- False, if the string doesn't match `pattern`. +- False, if the string does not match `pattern`. **Example** @@ -772,4 +811,3 @@ Result: │ 2 │ └───────────────────────────────┘ ``` - diff --git a/docs/en/sql-reference/functions/tuple-functions.md b/docs/en/sql-reference/functions/tuple-functions.md index 884e1ef754f..4189d0feeb5 100644 --- a/docs/en/sql-reference/functions/tuple-functions.md +++ b/docs/en/sql-reference/functions/tuple-functions.md @@ -111,4 +111,55 @@ Result: - [Tuple](../../sql-reference/data-types/tuple.md) -[Original article](https://clickhouse.tech/docs/en/sql-reference/functions/tuple-functions/) +## tupleHammingDistance {#tuplehammingdistance} + +Returns the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) between two tuples of the same size. + +**Syntax** + +``` sql +tupleHammingDistance(tuple1, tuple2) +``` + +**Arguments** + +- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md). +- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md). + +Tuples should have the same type of the elements. + +**Returned value** + +- The Hamming distance. + +Type: [UInt8](../../sql-reference/data-types/int-uint.md). + +**Examples** + +Query: + +``` sql +SELECT tupleHammingDistance((1, 2, 3), (3, 2, 1)) AS HammingDistance; +``` + +Result: + +``` text +┌─HammingDistance─┐ +│ 2 │ +└─────────────────┘ +``` + +Can be used with [MinHash](../../sql-reference/functions/hash-functions.md#ngramminhash) functions for detection of semi-duplicate strings: + +``` sql +SELECT tupleHammingDistance(wordShingleMinHash(string), wordShingleMinHashCaseInsensitive(string)) as HammingDistance FROM (SELECT 'ClickHouse is a column-oriented database management system for online analytical processing of queries.' AS string); +``` + +Result: + +``` text +┌─HammingDistance─┐ +│ 2 │ +└─────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 8544356f895..661469e6901 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -373,7 +373,7 @@ This function accepts a number or date or date with time, and returns a FixedStr ## reinterpretAsUUID {#reinterpretasuuid} -This function accepts 16 bytes string, and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the functions work as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored. +Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored. **Syntax** @@ -385,8 +385,6 @@ reinterpretAsUUID(fixed_string) - `fixed_string` — Big-endian byte string. [FixedString](../../sql-reference/data-types/fixedstring.md#fixedstring). -## reinterpret(x, T) {#type_conversion_function-reinterpret} - **Returned value** - The UUID type value. [UUID](../../sql-reference/data-types/uuid.md#uuid-data-type). @@ -398,9 +396,7 @@ String to UUID. Query: ``` sql -SELECT reinterpret(toInt8(-1), 'UInt8') as int_to_uint, - reinterpret(toInt8(1), 'Float32') as int_to_float, - reinterpret('1', 'UInt32') as string_to_int; +SELECT reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f'))); ``` Result: @@ -431,15 +427,84 @@ Result: └─────────────────────┘ ``` +## reinterpret(x, T) {#type_conversion_function-reinterpret} + +Uses the same source in-memory bytes sequence for `x` value and reinterprets it to destination type. + +**Syntax** + +``` sql +reinterpret(x, type) +``` + +**Arguments** + +- `x` — Any type. +- `type` — Destination type. [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- Destination type value. + +**Examples** + +Query: +```sql +SELECT reinterpret(toInt8(-1), 'UInt8') as int_to_uint, + reinterpret(toInt8(1), 'Float32') as int_to_float, + reinterpret('1', 'UInt32') as string_to_int; +``` + +Result: + +``` +┌─int_to_uint─┬─int_to_float─┬─string_to_int─┐ +│ 255 │ 1e-45 │ 49 │ +└─────────────┴──────────────┴───────────────┘ +``` + ## CAST(x, T) {#type_conversion_function-cast} -Converts input value `x` to the `T` data type. +Converts input value `x` to the `T` data type. Unlike to `reinterpret` function, type conversion is performed in a natural way. The syntax `CAST(x AS t)` is also supported. -Note, that if value `x` does not fit the bounds of type T, the function overflows. For example, CAST(-1, 'UInt8') returns 255. +!!! note "Note" + If value `x` does not fit the bounds of type `T`, the function overflows. For example, `CAST(-1, 'UInt8')` returns `255`. -**Example** +**Syntax** + +``` sql +CAST(x, T) +``` + +**Arguments** + +- `x` — Any type. +- `T` — Destination type. [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- Destination type value. + +**Examples** + +Query: + +```sql +SELECT + CAST(toInt8(-1), 'UInt8') AS cast_int_to_uint, + CAST(toInt8(1), 'Float32') AS cast_int_to_float, + CAST('1', 'UInt32') AS cast_string_to_int; +``` + +Result: + +``` +┌─cast_int_to_uint─┬─cast_int_to_float─┬─cast_string_to_int─┐ +│ 255 │ 1 │ 1 │ +└──────────────────┴───────────────────┴────────────────────┘ +``` Query: @@ -460,7 +525,7 @@ Result: └─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ ``` -Conversion to FixedString(N) only works for arguments of type String or FixedString(N). +Conversion to FixedString(N) only works for arguments of type [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md). Type conversion to [Nullable](../../sql-reference/data-types/nullable.md) and back is supported. @@ -634,6 +699,7 @@ Result: ``` ## parseDateTimeBestEffort {#parsedatetimebesteffort} +## parseDateTime32BestEffort {#parsedatetime32besteffort} Converts a date and time in the [String](../../sql-reference/data-types/string.md) representation to [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) data type. @@ -822,10 +888,12 @@ Result: ``` ## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} +## parseDateTime32BestEffortOrNull {#parsedatetime32besteffortornull} -Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns null when it encounters a date format that cannot be processed. +Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns `NULL` when it encounters a date format that cannot be processed. ## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} +## parseDateTime32BestEffortOrZero {#parsedatetime32besteffortorzero} Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed. @@ -1001,6 +1069,61 @@ Result: └─────────────────────────────────┘ ``` +## parseDateTime64BestEffort {#parsedatetime64besteffort} + +Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime) data type. + +**Syntax** + +``` sql +parseDateTime64BestEffort(time_string [, precision [, time_zone]]) +``` + +**Parameters** + +- `time_string` — String containing a date or date with time to convert. [String](../../sql-reference/data-types/string.md). +- `precision` — Required precision. `3` — for milliseconds, `6` — for microseconds. Default — `3`. Optional. [UInt8](../../sql-reference/data-types/int-uint.md). +- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- `time_string` converted to the [DateTime](../../sql-reference/data-types/datetime.md) data type. + +**Examples** + +Query: + +```sql +SELECT parseDateTime64BestEffort('2021-01-01') AS a, toTypeName(a) AS t +UNION ALL +SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346') AS a, toTypeName(a) AS t +UNION ALL +SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346',6) AS a, toTypeName(a) AS t +UNION ALL +SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346',3,'Europe/Moscow') AS a, toTypeName(a) AS t +FORMAT PrettyCompactMonoBlock; +``` + +Result: + +``` +┌──────────────────────────a─┬─t──────────────────────────────┐ +│ 2021-01-01 01:01:00.123000 │ DateTime64(3) │ +│ 2021-01-01 00:00:00.000000 │ DateTime64(3) │ +│ 2021-01-01 01:01:00.123460 │ DateTime64(6) │ +│ 2020-12-31 22:01:00.123000 │ DateTime64(3, 'Europe/Moscow') │ +└────────────────────────────┴────────────────────────────────┘ +``` + +## parseDateTime64BestEffortOrNull {#parsedatetime32besteffortornull} + +Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns `NULL` when it encounters a date format that cannot be processed. + +## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero} + +Same as for [parseDateTime64BestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed. + + ## toLowCardinality {#tolowcardinality} Converts input parameter to the [LowCardianlity](../../sql-reference/data-types/lowcardinality.md) version of same data type. @@ -1009,7 +1132,7 @@ To convert data from the `LowCardinality` data type use the [CAST](#type_convers **Syntax** -``` sql +```sql toLowCardinality(expr) ``` @@ -1027,7 +1150,7 @@ Type: `LowCardinality(expr_result_type)` Query: -``` sql +```sql SELECT toLowCardinality('1'); ``` @@ -1045,11 +1168,14 @@ Result: ## toUnixTimestamp64Nano {#tounixtimestamp64nano} -Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision. Please note that output value is a timestamp in UTC, not in timezone of `DateTime64`. +Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision. + +!!! info "Note" + The output value is a timestamp in UTC, not in the timezone of `DateTime64`. **Syntax** -``` sql +```sql toUnixTimestamp64Milli(value) ``` @@ -1065,7 +1191,7 @@ toUnixTimestamp64Milli(value) Query: -``` sql +```sql WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64 SELECT toUnixTimestamp64Milli(dt64); ``` @@ -1078,6 +1204,8 @@ Result: └──────────────────────────────┘ ``` +Query: + ``` sql WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64 SELECT toUnixTimestamp64Nano(dt64); @@ -1209,4 +1337,3 @@ Result: │ 2,"good" │ └───────────────────────────────────────────┘ ``` - diff --git a/docs/en/sql-reference/functions/url-functions.md b/docs/en/sql-reference/functions/url-functions.md index 9feb7a3c711..397ae45ec71 100644 --- a/docs/en/sql-reference/functions/url-functions.md +++ b/docs/en/sql-reference/functions/url-functions.md @@ -5,7 +5,7 @@ toc_title: URLs # Functions for Working with URLs {#functions-for-working-with-urls} -All these functions don’t follow the RFC. They are maximally simplified for improved performance. +All these functions do not follow the RFC. They are maximally simplified for improved performance. ## Functions that Extract Parts of a URL {#functions-that-extract-parts-of-a-url} @@ -398,7 +398,7 @@ Result: ## Functions that Remove Part of a URL {#functions-that-remove-part-of-a-url} -If the URL doesn’t have anything similar, the URL remains unchanged. +If the URL does not have anything similar, the URL remains unchanged. ### cutWWW {#cutwww} diff --git a/docs/en/sql-reference/functions/ym-dict-functions.md b/docs/en/sql-reference/functions/ym-dict-functions.md index 941f75ff006..f947c81c7a9 100644 --- a/docs/en/sql-reference/functions/ym-dict-functions.md +++ b/docs/en/sql-reference/functions/ym-dict-functions.md @@ -136,7 +136,7 @@ In the Yandex geobase, the population might be recorded for child regions, but n ### regionIn(lhs, rhs\[, geobase\]) {#regioninlhs-rhs-geobase} -Checks whether a ‘lhs’ region belongs to a ‘rhs’ region. Returns a UInt8 number equal to 1 if it belongs, or 0 if it doesn’t belong. +Checks whether a ‘lhs’ region belongs to a ‘rhs’ region. Returns a UInt8 number equal to 1 if it belongs, or 0 if it does not belong. The relationship is reflexive – any region also belongs to itself. ### regionHierarchy(id\[, geobase\]) {#regionhierarchyid-geobase} @@ -146,7 +146,7 @@ Example: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. ### regionToName(id\[, lang\]) {#regiontonameid-lang} -Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn’t exist, an empty string is returned. +Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID does not exist, an empty string is returned. `ua` and `uk` both mean Ukrainian. diff --git a/docs/en/sql-reference/operators/in.md b/docs/en/sql-reference/operators/in.md index 0abeabc7f57..3d8d2673468 100644 --- a/docs/en/sql-reference/operators/in.md +++ b/docs/en/sql-reference/operators/in.md @@ -208,10 +208,10 @@ and the temporary table `_data1` will be sent to every remote server with the qu This is more optimal than using the normal IN. However, keep the following points in mind: -1. When creating a temporary table, data is not made unique. To reduce the volume of data transmitted over the network, specify DISTINCT in the subquery. (You don’t need to do this for a normal IN.) +1. When creating a temporary table, data is not made unique. To reduce the volume of data transmitted over the network, specify DISTINCT in the subquery. (You do not need to do this for a normal IN.) 2. The temporary table will be sent to all the remote servers. Transmission does not account for network topology. For example, if 10 remote servers reside in a datacenter that is very remote in relation to the requestor server, the data will be sent 10 times over the channel to the remote datacenter. Try to avoid large data sets when using GLOBAL IN. 3. When transmitting data to remote servers, restrictions on network bandwidth are not configurable. You might overload the network. -4. Try to distribute data across servers so that you don’t need to use GLOBAL IN on a regular basis. +4. Try to distribute data across servers so that you do not need to use GLOBAL IN on a regular basis. 5. If you need to use GLOBAL IN often, plan the location of the ClickHouse cluster so that a single group of replicas resides in no more than one data center with a fast network between them, so that a query can be processed entirely within a single data center. It also makes sense to specify a local table in the `GLOBAL IN` clause, in case this local table is only available on the requestor server and you want to use data from it on remote servers. @@ -236,4 +236,4 @@ where M is between 1 and 3 depending on which replica the local query is executi Therefore adding the max_parallel_replicas setting will only produce correct results if both tables have the same replication scheme and are sampled by UserID or a subkey of it. In particular, if local_table_2 does not have a sampling key, incorrect results will be produced. The same rule applies to JOIN. -One workaround if local_table_2 doesn't meet the requirements, is to use `GLOBAL IN` or `GLOBAL JOIN`. +One workaround if local_table_2 does not meet the requirements, is to use `GLOBAL IN` or `GLOBAL JOIN`. diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index e073d5f23f0..31fce7f72b3 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -250,7 +250,7 @@ The following operators do not have a priority since they are brackets: ## Associativity {#associativity} All binary operators have left associativity. For example, `1 + 2 + 3` is transformed to `plus(plus(1, 2), 3)`. -Sometimes this doesn’t work the way you expect. For example, `SELECT 4 > 2 > 3` will result in 0. +Sometimes this does not work the way you expect. For example, `SELECT 4 > 2 > 3` will result in 0. For efficiency, the `and` and `or` functions accept any number of arguments. The corresponding chains of `AND` and `OR` operators are transformed into a single call of these functions. diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 3ece30be5b8..2e7cd1be952 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -39,7 +39,7 @@ Adds a new column to the table with the specified `name`, `type`, [`codec`](../. If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions. -Adding a column just changes the table structure, without performing any actions with data. The data doesn’t appear on the disk after `ALTER`. If the data is missing for a column when reading from the table, it is filled in with default values (by performing the default expression if there is one, or using zeros or empty strings). The column appears on the disk after merging data parts (see [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md)). +Adding a column just changes the table structure, without performing any actions with data. The data does not appear on the disk after `ALTER`. If the data is missing for a column when reading from the table, it is filled in with default values (by performing the default expression if there is one, or using zeros or empty strings). The column appears on the disk after merging data parts (see [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md)). This approach allows us to complete the `ALTER` query instantly, without increasing the volume of old data. @@ -70,10 +70,13 @@ Added3 UInt32 DROP COLUMN [IF EXISTS] name ``` -Deletes the column with the name `name`. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. +Deletes the column with the name `name`. If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. Deletes data from the file system. Since this deletes entire files, the query is completed almost instantly. +!!! warning "Warning" + You can’t delete a column if it is referenced by [materialized view](../../../sql-reference/statements/create/view.md#materialized). Otherwise, it returns an error. + Example: ``` sql @@ -86,7 +89,7 @@ ALTER TABLE visits DROP COLUMN browser RENAME COLUMN [IF EXISTS] name to new_name ``` -Renames the column `name` to `new_name`. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. Since renaming does not involve the underlying data, the query is completed almost instantly. +Renames the column `name` to `new_name`. If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. Since renaming does not involve the underlying data, the query is completed almost instantly. **NOTE**: Columns specified in the key expression of the table (either with `ORDER BY` or `PRIMARY KEY`) cannot be renamed. Trying to change these columns will produce `SQL Error [524]`. @@ -104,7 +107,7 @@ CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to specify the partition expression](#alter-how-to-specify-part-expr). -If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. +If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. Example: @@ -118,7 +121,7 @@ ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() COMMENT COLUMN [IF EXISTS] name 'comment' ``` -Adds a comment to the column. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. +Adds a comment to the column. If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. Each column can have one comment. If a comment already exists for the column, a new comment overwrites the previous comment. @@ -146,11 +149,11 @@ This query changes the `name` column properties: For examples of columns TTL modifying, see [Column TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl). -If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. +If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#alter_add-column) description. -When changing the type, values are converted as if the [toType](../../../sql-reference/functions/type-conversion-functions.md) functions were applied to them. If only the default expression is changed, the query doesn’t do anything complex, and is completed almost instantly. +When changing the type, values are converted as if the [toType](../../../sql-reference/functions/type-conversion-functions.md) functions were applied to them. If only the default expression is changed, the query does not do anything complex, and is completed almost instantly. Example: @@ -180,7 +183,7 @@ ALTER TABLE table_name MODIFY column_name REMOVE property; ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; ``` -## See Also +**See Also** - [REMOVE TTL](ttl.md). @@ -210,4 +213,4 @@ If the `ALTER` query is not sufficient to make the table changes you need, you c The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running. -For tables that don’t store data themselves (such as `Merge` and `Distributed`), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. +For tables that do not store data themselves (such as `Merge` and `Distributed`), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 42396223b86..b38643d6027 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -16,7 +16,7 @@ The following operations with [partitions](../../../engines/table-engines/merget - [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) — Resets the value of a specified column in a partition. - [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — Resets the specified secondary index in a partition. - [FREEZE PARTITION](#alter_freeze-partition) — Creates a backup of a partition. -- [FETCH PARTITION](#alter_fetch-partition) — Downloads a partition from another server. +- [FETCH PARTITION\|PART](#alter_fetch-partition) — Downloads a part or partition from another server. - [MOVE PARTITION\|PART](#alter_move-partition) — Move partition/data part to another disk or volume. @@ -40,7 +40,7 @@ Read about setting the partition expression in a section [How to specify the par After the query is executed, you can do whatever you want with the data in the `detached` directory — delete it from the file system, or just leave it. -This query is replicated – it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replica. +This query is replicated – it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replicas (as multiple leaders are allowed). ## DROP PARTITION\|PART {#alter_drop-partition} @@ -85,9 +85,13 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0; Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). -This query is replicated. The replica-initiator checks whether there is data in the `detached` directory. If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table. All other replicas download the data from the replica-initiator. +This query is replicated. The replica-initiator checks whether there is data in the `detached` directory. +If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table. -So you can put data to the `detached` directory on one replica, and use the `ALTER ... ATTACH` query to add it to the table on all replicas. +If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own `detached` folder, it attaches the data without fetching it from other replicas. +If there is no part with the correct checksums, the data is downloaded from any replica having the part. + +You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the table on all replicas. ## ATTACH PARTITION FROM {#alter_attach-partition-from} @@ -95,7 +99,8 @@ So you can put data to the `detached` directory on one replica, and use the `ALT ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 ``` -This query copies the data partition from the `table1` to `table2` adds data to exsisting in the `table2`. Note that data won’t be deleted from `table1`. +This query copies the data partition from `table1` to `table2`. +Note that data will be deleted neither from `table1` nor from `table2`. For the query to run successfully, the following conditions must be met: @@ -179,7 +184,7 @@ To restore data from a backup, do the following: 2. Copy the data from the `data/database/table/` directory inside the backup to the `/var/lib/clickhouse/data/database/table/detached/` directory. 3. Run `ALTER TABLE t ATTACH PARTITION` queries to add the data to a table. -Restoring from a backup doesn’t require stopping the server. +Restoring from a backup does not require stopping the server. For more information about backups and restoring data, see the [Data Backup](../../../operations/backup.md) section. @@ -191,29 +196,35 @@ ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data. -## FETCH PARTITION {#alter_fetch-partition} +## FETCH PARTITION|PART {#alter_fetch-partition} ``` sql -ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +ALTER TABLE table_name FETCH PARTITION|PART partition_expr FROM 'path-in-zookeeper' ``` Downloads a partition from another server. This query only works for the replicated tables. The query does the following: -1. Downloads the partition from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. +1. Downloads the partition|part from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. 2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table. For example: +1. FETCH PARTITION ``` sql ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; ALTER TABLE users ATTACH PARTITION 201902; ``` +2. FETCH PART +``` sql +ALTER TABLE users FETCH PART 201901_2_2_0 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PART 201901_2_2_0; +``` Note that: -- The `ALTER ... FETCH PARTITION` query isn’t replicated. It places the partition to the `detached` directory only on the local server. +- The `ALTER ... FETCH PARTITION|PART` query isn’t replicated. It places the part or partition to the `detached` directory only on the local server. - The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas. Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas. diff --git a/docs/en/sql-reference/statements/alter/ttl.md b/docs/en/sql-reference/statements/alter/ttl.md index aa7ee838e10..9cd63d3b8fe 100644 --- a/docs/en/sql-reference/statements/alter/ttl.md +++ b/docs/en/sql-reference/statements/alter/ttl.md @@ -79,7 +79,7 @@ The `TTL` is no longer there, so the second row is not deleted: └───────────────────────┴─────────┴──────────────┘ ``` -### See Also +**See Also** - More about the [TTL-expression](../../../sql-reference/statements/create/table.md#ttl-expression). - Modify column [with TTL](../../../sql-reference/statements/alter/column.md#alter_modify-column). diff --git a/docs/en/sql-reference/statements/alter/user.md b/docs/en/sql-reference/statements/alter/user.md index b590bf4887d..73081bc8619 100644 --- a/docs/en/sql-reference/statements/alter/user.md +++ b/docs/en/sql-reference/statements/alter/user.md @@ -15,11 +15,23 @@ ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1] [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}] [[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] + [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...] ``` To use `ALTER USER` you must have the [ALTER USER](../../../sql-reference/statements/grant.md#grant-access-management) privilege. +## GRANTEES Clause {#grantees} + +Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#grant-privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Options of the `GRANTEES` clause: + +- `user` — Specifies a user this user can grant privileges to. +- `role` — Specifies a role this user can grant privileges to. +- `ANY` — This user can grant privileges to anyone. It's the default setting. +- `NONE` — This user can grant privileges to none. + +You can exclude any user or role by using the `EXCEPT` expression. For example, `ALTER USER user1 GRANTEES ANY EXCEPT user2`. It means if `user1` has some privileges granted with `GRANT OPTION` it will be able to grant those privileges to anyone except `user2`. + ## Examples {#alter-user-examples} Set assigned roles as default: @@ -43,3 +55,9 @@ Set all the assigned roles to default, excepting `role1` and `role2`: ``` sql ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2 ``` + +Allows the user with `john` account to grant his privileges to the user with `jack` account: + +``` sql +ALTER USER john GRANTEES jack; +``` diff --git a/docs/en/sql-reference/statements/attach.md b/docs/en/sql-reference/statements/attach.md index ffb577a8839..01783e9cb2f 100644 --- a/docs/en/sql-reference/statements/attach.md +++ b/docs/en/sql-reference/statements/attach.md @@ -5,13 +5,14 @@ toc_title: ATTACH # ATTACH Statement {#attach} -This query is exactly the same as [CREATE](../../sql-reference/statements/create/table.md), but +Attaches the table, for example, when moving a database to another server. -- Instead of the word `CREATE` it uses the word `ATTACH`. -- The query does not create data on the disk, but assumes that data is already in the appropriate places, and just adds information about the table to the server. - After executing an ATTACH query, the server will know about the existence of the table. +The query does not create data on the disk, but assumes that data is already in the appropriate places, and just adds information about the table to the server. After executing an `ATTACH` query, the server will know about the existence of the table. -If the table was previously detached ([DETACH](../../sql-reference/statements/detach.md)), meaning that its structure is known, you can use shorthand without defining the structure. +If the table was previously detached ([DETACH](../../sql-reference/statements/detach.md)) query, meaning that its structure is known, you can use shorthand without defining the structure. + +## Syntax Forms {#syntax-forms} +### Attach Existing Table {#attach-existing-table} ``` sql ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] @@ -21,4 +22,38 @@ This query is used when starting the server. The server stores table metadata as If the table was detached permanently, it won't be reattached at the server start, so you need to use `ATTACH` query explicitly. -[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/attach/) +### Сreate New Table And Attach Data {#create-new-table-and-attach-data} + +**With specify path to table data** + +```sql +ATTACH TABLE name FROM 'path/to/data/' (col1 Type1, ...) +``` + +It creates new table with provided structure and attaches table data from provided directory in `user_files`. + +**Example** + +Query: + +```sql +DROP TABLE IF EXISTS test; +INSERT INTO TABLE FUNCTION file('01188_attach/test/data.TSV', 'TSV', 's String, n UInt8') VALUES ('test', 42); +ATTACH TABLE test FROM '01188_attach/test' (s String, n UInt8) ENGINE = File(TSV); +SELECT * FROM test; +``` +Result: + +```sql +┌─s────┬──n─┐ +│ test │ 42 │ +└──────┴────┘ +``` + +**With specify table UUID** (Only for `Atomic` database) + +```sql +ATTACH TABLE name UUID '' (col1 Type1, ...) +``` + +It creates new table with provided structure and attaches data from table with the specified UUID. \ No newline at end of file diff --git a/docs/en/sql-reference/statements/check-table.md b/docs/en/sql-reference/statements/check-table.md index 450447acaf8..d40fe263b1a 100644 --- a/docs/en/sql-reference/statements/check-table.md +++ b/docs/en/sql-reference/statements/check-table.md @@ -28,11 +28,38 @@ The `CHECK TABLE` query supports the following table engines: Performed over the tables with another table engines causes an exception. -Engines from the `*Log` family don’t provide automatic data recovery on failure. Use the `CHECK TABLE` query to track data loss in a timely manner. +Engines from the `*Log` family do not provide automatic data recovery on failure. Use the `CHECK TABLE` query to track data loss in a timely manner. -For `MergeTree` family engines, the `CHECK TABLE` query shows a check status for every individual data part of a table on the local server. +## Checking the MergeTree Family Tables {#checking-mergetree-tables} -**If the data is corrupted** +For `MergeTree` family engines, if [check_query_single_value_result](../../operations/settings/settings.md#check_query_single_value_result) = 0, the `CHECK TABLE` query shows a check status for every individual data part of a table on the local server. + +```sql +SET check_query_single_value_result = 0; +CHECK TABLE test_table; +``` + +```text +┌─part_path─┬─is_passed─┬─message─┐ +│ all_1_4_1 │ 1 │ │ +│ all_1_4_2 │ 1 │ │ +└───────────┴───────────┴─────────┘ +``` + +If `check_query_single_value_result` = 0, the `CHECK TABLE` query shows the general table check status. + +```sql +SET check_query_single_value_result = 1; +CHECK TABLE test_table; +``` + +```text +┌─result─┐ +│ 1 │ +└────────┘ +``` + +## If the Data Is Corrupted {#if-data-is-corrupted} If the table is corrupted, you can copy the non-corrupted data to another table. To do this: diff --git a/docs/en/sql-reference/statements/create/database.md b/docs/en/sql-reference/statements/create/database.md index bdb31d44b0b..3c6f73d54db 100644 --- a/docs/en/sql-reference/statements/create/database.md +++ b/docs/en/sql-reference/statements/create/database.md @@ -15,7 +15,7 @@ CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(.. ### IF NOT EXISTS {#if-not-exists} -If the `db_name` database already exists, then ClickHouse doesn’t create a new database and: +If the `db_name` database already exists, then ClickHouse does not create a new database and: - Doesn’t throw an exception if clause is specified. - Throws an exception if clause isn’t specified. diff --git a/docs/en/sql-reference/statements/create/row-policy.md b/docs/en/sql-reference/statements/create/row-policy.md index cbe639c6fc5..1df7cc36995 100644 --- a/docs/en/sql-reference/statements/create/row-policy.md +++ b/docs/en/sql-reference/statements/create/row-policy.md @@ -5,39 +5,84 @@ toc_title: ROW POLICY # CREATE ROW POLICY {#create-row-policy-statement} -Creates [filters for rows](../../../operations/access-rights.md#row-policy-management), which a user can read from a table. +Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. + +!!! note "Warning" + Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies. Syntax: ``` sql CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1 [, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...] + [FOR SELECT] USING condition [AS {PERMISSIVE | RESTRICTIVE}] - [FOR SELECT] - [USING condition] [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] ``` -`ON CLUSTER` clause allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). +## USING Clause {#create-row-policy-using} -## AS Clause {#create-row-policy-as} - -Using this section you can create permissive or restrictive policies. - -Permissive policy grants access to rows. Permissive policies which apply to the same table are combined together using the boolean `OR` operator. Policies are permissive by default. - -Restrictive policy restricts access to rows. Restrictive policies which apply to the same table are combined together using the boolean `AND` operator. - -Restrictive policies apply to rows that passed the permissive filters. If you set restrictive policies but no permissive policies, the user can’t get any row from the table. +Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row. ## TO Clause {#create-row-policy-to} -In the section `TO` you can provide a mixed list of roles and users, for example, `CREATE ROW POLICY ... TO accountant, john@localhost`. +In the section `TO` you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`. -Keyword `ALL` means all the ClickHouse users including current user. Keywords `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` +Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` -## Examples {#examples} +!!! note "Note" + If there are no row policies defined for a table then any user can `SELECT` all the row from the table. Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if those row policies are defined for the current user or not. For example, the following policy + + `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost` + forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all. + + If that's not desirable it can't be fixed by adding one more row policy, like the following: -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira` + `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` + +## AS Clause {#create-row-policy-as} + +It's allowed to have more than one policy enabled on the same table for the same user at the one time. So we need a way to combine the conditions from multiple policies. + +By default policies are combined using the boolean `OR` operator. For example, the following policies + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio +``` + +enables the user `peter` to see rows with either `b=1` or `c=2`. + +The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default policies are permissive, which means they are combined using the boolean `OR` operator. + +A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator. + +Here is the general formula: + +``` +row_is_visible = (one or more of the permissive policies' conditions are non-zero) AND + (all of the restrictive policies's conditions are non-zero) +``` + +For example, the following policies + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio +``` + +enables the user `peter` to see rows only if both `b=1` AND `c=2`. + +## ON CLUSTER Clause {#create-row-policy-on-cluster} + +Allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). + + +## Examples + +`CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` + +`CREATE ROW POLICY filter2 ON mydb.mytable USING a<1000 AND b=5 TO ALL EXCEPT mira` + +`CREATE ROW POLICY filter3 ON mydb.mytable USING 1 TO admin` diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index b98888f7bfa..70ac9acd186 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -50,15 +50,32 @@ Creates a table with the same result as that of the [table function](../../../sq ### From SELECT query {#from-select-query} ``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... +CREATE TABLE [IF NOT EXISTS] [db.]table_name[(name1 [type1], name2 [type2], ...)] ENGINE = engine AS SELECT ... ``` -Creates a table with a structure like the result of the `SELECT` query, with the `engine` engine, and fills it with data from SELECT. +Creates a table with a structure like the result of the `SELECT` query, with the `engine` engine, and fills it with data from `SELECT`. Also you can explicitly specify columns description. -In all cases, if `IF NOT EXISTS` is specified, the query won’t return an error if the table already exists. In this case, the query won’t do anything. +If the table already exists and `IF NOT EXISTS` is specified, the query won’t do anything. There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines). +**Example** + +Query: + +``` sql +CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1; +SELECT x, toTypeName(x) FROM t1; +``` + +Result: + +```text +┌─x─┬─toTypeName(x)─┐ +│ 1 │ String │ +└───┴───────────────┘ +``` + ## NULL Or NOT NULL Modifiers {#null-modifiers} `NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable). @@ -79,13 +96,13 @@ If the default expression is defined, the column type is optional. If there isn If the data type and default expression are defined explicitly, this expression will be cast to the specified type using type casting functions. Example: `Hits UInt32 DEFAULT 0` means the same thing as `Hits UInt32 DEFAULT toUInt32(0)`. -Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions don’t contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. +Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions do not contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. ### DEFAULT {#default} `DEFAULT expr` -Normal default value. If the INSERT query doesn’t specify the corresponding column, it will be filled in by computing the corresponding expression. +Normal default value. If the INSERT query does not specify the corresponding column, it will be filled in by computing the corresponding expression. ### MATERIALIZED {#materialized} @@ -217,14 +234,14 @@ High compression levels are useful for asymmetric scenarios, like compress once, ### Specialized Codecs {#create-query-specialized-codecs} -These codecs are designed to make compression more effective by using specific features of data. Some of these codecs don’t compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. +These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. Specialized codecs: - `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` are used for storing delta values, so `delta_bytes` is the maximum size of raw values. Possible `delta_bytes` values: 1, 2, 4, 8. The default value for `delta_bytes` is `sizeof(type)` if equal to 1, 2, 4, or 8. In all other cases, it’s 1. - `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). - `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` and `DateTime`). At each step of its algorithm, codec takes a block of 64 values, puts them into 64x64 bit matrix, transposes it, crops the unused bits of values and returns the rest as a sequence. Unused bits are the bits, that don’t differ between maximum and minimum values in the whole data part for which the compression is used. +- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` and `DateTime`). At each step of its algorithm, codec takes a block of 64 values, puts them into 64x64 bit matrix, transposes it, crops the unused bits of values and returns the rest as a sequence. Unused bits are the bits, that do not differ between maximum and minimum values in the whole data part for which the compression is used. `DoubleDelta` and `Gorilla` codecs are used in Gorilla TSDB as the components of its compressing algorithm. Gorilla approach is effective in scenarios when there is a sequence of slowly changing values with their timestamps. Timestamps are effectively compressed by the `DoubleDelta` codec, and values are effectively compressed by the `Gorilla` codec. For example, to get an effectively stored table, you can create it in the following configuration: @@ -270,7 +287,7 @@ It’s possible to use tables with [ENGINE = Memory](../../../engines/table-engi !!!note "Note" This query is supported only for [Atomic](../../../engines/database-engines/atomic.md) database engine. -If you need to delete some data from a table, you can create a new table and fill it with a `SELECT` statement that doesn't retrieve unwanted data, then drop the old table and rename the new one: +If you need to delete some data from a table, you can create a new table and fill it with a `SELECT` statement that does not retrieve unwanted data, then drop the old table and rename the new one: ```sql CREATE TABLE myNewTable AS myOldTable; @@ -287,7 +304,9 @@ REPLACE TABLE myOldTable SELECT * FROM myOldTable WHERE CounterID <12345; ### Syntax -{CREATE [OR REPLACE]|REPLACE} TABLE [db.]table_name +``` sql +{CREATE [OR REPLACE] | REPLACE} TABLE [db.]table_name +``` All syntax forms for `CREATE` query also work for this query. `REPLACE` for a non-existent table will cause an error. @@ -335,3 +354,39 @@ SELECT * FROM base.t1; │ 3 │ └───┘ ``` + +## COMMENT Clause {#comment-table} + +You can add a comment to the table when you creating it. + +!!!note "Note" + The comment is supported for all table engines except [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) and [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md). + + +**Syntax** + +``` sql +CREATE TABLE db.table_name +( + name1 type1, name2 type2, ... +) +ENGINE = engine +COMMENT 'Comment' +``` + +**Example** + +Query: + +``` sql +CREATE TABLE t1 (x String) ENGINE = Memory COMMENT 'The temporary table'; +SELECT name, comment FROM system.tables WHERE name = 't1'; +``` + +Result: + +```text +┌─name─┬─comment─────────────┐ +│ t1 │ The temporary table │ +└──────┴─────────────────────┘ +``` diff --git a/docs/en/sql-reference/statements/create/user.md b/docs/en/sql-reference/statements/create/user.md index 49a4e3813a1..ad9f203b768 100644 --- a/docs/en/sql-reference/statements/create/user.md +++ b/docs/en/sql-reference/statements/create/user.md @@ -15,6 +15,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}] [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [DEFAULT ROLE role [,...]] + [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...] ``` @@ -51,14 +52,26 @@ Another way of specifying host is to use `@` syntax following the username. Exam - `CREATE USER mira@'192.168.%.%'` — Equivalent to the `HOST LIKE` syntax. !!! info "Warning" - ClickHouse treats `user_name@'address'` as a username as a whole. Thus, technically you can create multiple users with the same `user_name` and different constructions after `@`. However, we don’t recommend to do so. + ClickHouse treats `user_name@'address'` as a username as a whole. Thus, technically you can create multiple users with the same `user_name` and different constructions after `@`. However, we do not recommend to do so. + + +## GRANTEES Clause {#grantees} + +Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#grant-privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Options of the `GRANTEES` clause: + +- `user` — Specifies a user this user can grant privileges to. +- `role` — Specifies a role this user can grant privileges to. +- `ANY` — This user can grant privileges to anyone. It's the default setting. +- `NONE` — This user can grant privileges to none. + +You can exclude any user or role by using the `EXCEPT` expression. For example, `CREATE USER user1 GRANTEES ANY EXCEPT user2`. It means if `user1` has some privileges granted with `GRANT OPTION` it will be able to grant those privileges to anyone except `user2`. ## Examples {#create-user-examples} Create the user account `mira` protected by the password `qwerty`: ``` sql -CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' +CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty'; ``` `mira` should start client app at the host where the ClickHouse server runs. @@ -66,13 +79,13 @@ CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' Create the user account `john`, assign roles to it and make this roles default: ``` sql -CREATE USER john DEFAULT ROLE role1, role2 +CREATE USER john DEFAULT ROLE role1, role2; ``` Create the user account `john` and make all his future roles default: ``` sql -CREATE USER user DEFAULT ROLE ALL +CREATE USER john DEFAULT ROLE ALL; ``` When some role is assigned to `john` in the future, it will become default automatically. @@ -80,5 +93,11 @@ When some role is assigned to `john` in the future, it will become default autom Create the user account `john` and make all his future roles default excepting `role1` and `role2`: ``` sql -CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2 +CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2; +``` + +Create the user account `john` and allow him to grant his privileges to the user with `jack` account: + +``` sql +CREATE USER john GRANTEES jack; ``` diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 633db355d4a..4b51bb8b067 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -15,7 +15,7 @@ Syntax: CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] AS SELECT ... ``` -Normal views don’t store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. +Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. As an example, assume you’ve created a view: @@ -50,9 +50,9 @@ When creating a materialized view with `TO [db].[table]`, you must not use `POPU A materialized view is implemented as follows: when inserting data to the table specified in `SELECT`, part of the inserted data is converted by this `SELECT` query, and the result is inserted in the view. !!! important "Important" - Materialized views in ClickHouse are implemented more like insert triggers. If there’s some aggregation in the view query, it’s applied only to the batch of freshly inserted data. Any changes to existing data of source table (like update, delete, drop partition, etc.) doesn’t change the materialized view. + Materialized views in ClickHouse are implemented more like insert triggers. If there’s some aggregation in the view query, it’s applied only to the batch of freshly inserted data. Any changes to existing data of source table (like update, delete, drop partition, etc.) does not change the materialized view. -If you specify `POPULATE`, the existing table data is inserted in the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We **don’t recommend** using POPULATE, since data inserted in the table during the view creation will not be inserted in it. +If you specify `POPULATE`, the existing table data is inserted in the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We **do not recommend** using POPULATE, since data inserted in the table during the view creation will not be inserted in it. A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`. @@ -229,7 +229,7 @@ WATCH lv ``` ``` -Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.lv doesn't exist.. +Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.lv does not exist.. ``` ### Usage {#live-view-usage} diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index e9c9ed3693c..a181dd8deee 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -10,14 +10,14 @@ Makes the server "forget" about the existence of the table or materialized view. Syntax: ``` sql -DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] +DETACH TABLE|VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] [PERMANENTLY] ``` Detaching does not delete the data or metadata for the table or materialized view. If the table or view was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view again. If the table or view was detached `PERMANENTLY`, there will be no automatic recall. Whether the table was detached permanently or not, in both cases you can reattach it using the [ATTACH](../../sql-reference/statements/attach.md). System log tables can be also attached back (e.g. `query_log`, `text_log`, etc). Other system tables can't be reattached. On the next server launch the server will recall those tables again. -`ATTACH MATERIALIZED VIEW` doesn't work with short syntax (without `SELECT`), but you can attach it using the `ATTACH TABLE` query. +`ATTACH MATERIALIZED VIEW` does not work with short syntax (without `SELECT`), but you can attach it using the `ATTACH TABLE` query. Note that you can not detach permanently the table which is already detached (temporary). But you can attach it back and then detach permanently again. @@ -64,7 +64,7 @@ Result: ``` text Received exception from server (version 21.4.1): -Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.test doesn't exist. +Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.test does not exist. ``` [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/detach/) diff --git a/docs/en/sql-reference/statements/drop.md b/docs/en/sql-reference/statements/drop.md index 4317a20419e..90a2a46c7cf 100644 --- a/docs/en/sql-reference/statements/drop.md +++ b/docs/en/sql-reference/statements/drop.md @@ -5,7 +5,7 @@ toc_title: DROP # DROP Statements {#drop} -Deletes existing entity. If the `IF EXISTS` clause is specified, these queries don’t return an error if the entity doesn’t exist. +Deletes existing entity. If the `IF EXISTS` clause is specified, these queries do not return an error if the entity does not exist. ## DROP DATABASE {#drop-database} @@ -97,4 +97,4 @@ Syntax: DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` -[Оriginal article](https://clickhouse.tech/docs/en/sql-reference/statements/drop/) \ No newline at end of file +[Оriginal article](https://clickhouse.tech/docs/en/sql-reference/statements/drop/) diff --git a/docs/en/sql-reference/statements/exists.md b/docs/en/sql-reference/statements/exists.md index 3b0f4b66343..b7c4a487791 100644 --- a/docs/en/sql-reference/statements/exists.md +++ b/docs/en/sql-reference/statements/exists.md @@ -9,4 +9,4 @@ toc_title: EXISTS EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] ``` -Returns a single `UInt8`-type column, which contains the single value `0` if the table or database doesn’t exist, or `1` if the table exists in the specified database. +Returns a single `UInt8`-type column, which contains the single value `0` if the table or database does not exist, or `1` if the table exists in the specified database. diff --git a/docs/en/sql-reference/statements/explain.md b/docs/en/sql-reference/statements/explain.md index 3cca29801dd..f22f92c625a 100644 --- a/docs/en/sql-reference/statements/explain.md +++ b/docs/en/sql-reference/statements/explain.md @@ -5,7 +5,7 @@ toc_title: EXPLAIN # EXPLAIN Statement {#explain} -Show the execution plan of a statement. +Shows the execution plan of a statement. Syntax: @@ -47,9 +47,9 @@ Union ### EXPLAIN AST {#explain-ast} -Dump query AST. +Dump query AST. Supports all types of queries, not only `SELECT`. -Example: +Examples: ```sql EXPLAIN AST SELECT 1; @@ -63,9 +63,25 @@ SelectWithUnionQuery (children 1) Literal UInt64_1 ``` +```sql +EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today(); +``` + +```sql + explain + AlterQuery t1 (children 1) + ExpressionList (children 1) + AlterCommand 27 (children 1) + Function equals (children 1) + ExpressionList (children 2) + Identifier date + Function today (children 1) + ExpressionList +``` + ### EXPLAIN SYNTAX {#explain-syntax} -Return query after syntax optimizations. +Returns query after syntax optimizations. Example: @@ -88,15 +104,18 @@ FROM ) AS `--.s` CROSS JOIN system.numbers AS c ``` + ### EXPLAIN PLAN {#explain-plan} Dump query plan steps. Settings: -- `header` — Print output header for step. Default: 0. -- `description` — Print step description. Default: 1. -- `actions` — Print detailed information about step actions. Default: 0. +- `header` — Prints output header for step. Default: 0. +- `description` — Prints step description. Default: 1. +- `indexes` — Shows used indexes, the number of filtered parts and the number of filtered granules for every index applied. Default: 0. Supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. +- `actions` — Prints detailed information about step actions. Default: 0. +- `json` — Prints query plan steps as a row in [JSON](../../interfaces/formats.md#json) format. Default: 0. It is recommended to use [TSVRaw](../../interfaces/formats.md#tabseparatedraw) format to avoid unnecessary escaping. Example: @@ -115,15 +134,234 @@ Union ``` !!! note "Note" - Step and query cost estimation is not supported. + Step and query cost estimation is not supported. + +When `json = 1`, the query plan is represented in JSON format. Every node is a dictionary that always has the keys `Node Type` and `Plans`. `Node Type` is a string with a step name. `Plans` is an array with child step descriptions. Other optional keys may be added depending on node type and settings. + +Example: + +```sql +EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw; +``` + +```json +[ + { + "Plan": { + "Node Type": "Union", + "Plans": [ + { + "Node Type": "Expression", + "Plans": [ + { + "Node Type": "SettingQuotaAndLimits", + "Plans": [ + { + "Node Type": "ReadFromStorage" + } + ] + } + ] + }, + { + "Node Type": "Expression", + "Plans": [ + { + "Node Type": "SettingQuotaAndLimits", + "Plans": [ + { + "Node Type": "ReadFromStorage" + } + ] + } + ] + } + ] + } + } +] +``` + +With `description` = 1, the `Description` key is added to the step: + +```json +{ + "Node Type": "ReadFromStorage", + "Description": "SystemOne" +} +``` + +With `header` = 1, the `Header` key is added to the step as an array of columns. + +Example: + +```sql +EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy; +``` + +```json +[ + { + "Plan": { + "Node Type": "Expression", + "Header": [ + { + "Name": "1", + "Type": "UInt8" + }, + { + "Name": "plus(2, dummy)", + "Type": "UInt16" + } + ], + "Plans": [ + { + "Node Type": "SettingQuotaAndLimits", + "Header": [ + { + "Name": "dummy", + "Type": "UInt8" + } + ], + "Plans": [ + { + "Node Type": "ReadFromStorage", + "Header": [ + { + "Name": "dummy", + "Type": "UInt8" + } + ] + } + ] + } + ] + } + } +] +``` + +With `indexes` = 1, the `Indexes` key is added. It contains an array of used indexes. Each index is described as JSON with `Type` key (a string `MinMax`, `Partition`, `PrimaryKey` or `Skip`) and optional keys: + +- `Name` — An index name (for now, is used only for `Skip` index). +- `Keys` — An array of columns used by the index. +- `Condition` — A string with condition used. +- `Description` — An index (for now, is used only for `Skip` index). +- `Initial Parts` — A number of parts before the index is applied. +- `Selected Parts` — A number of parts after the index is applied. +- `Initial Granules` — A number of granules before the index is applied. +- `Selected Granulesis` — A number of granules after the index is applied. + +Example: + +```json +"Node Type": "ReadFromMergeTree", +"Indexes": [ + { + "Type": "MinMax", + "Keys": ["y"], + "Condition": "(y in [1, +inf))", + "Initial Parts": 5, + "Selected Parts": 4, + "Initial Granules": 12, + "Selected Granules": 11 + }, + { + "Type": "Partition", + "Keys": ["y", "bitAnd(z, 3)"], + "Condition": "and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +inf)), (bitAnd(z, 3) not in [1, 1])))", + "Initial Parts": 4, + "Selected Parts": 3, + "Initial Granules": 11, + "Selected Granules": 10 + }, + { + "Type": "PrimaryKey", + "Keys": ["x", "y"], + "Condition": "and((x in [11, +inf)), (y in [1, +inf)))", + "Initial Parts": 3, + "Selected Parts": 2, + "Initial Granules": 10, + "Selected Granules": 6 + }, + { + "Type": "Skip", + "Name": "t_minmax", + "Description": "minmax GRANULARITY 2", + "Initial Parts": 2, + "Selected Parts": 1, + "Initial Granules": 6, + "Selected Granules": 2 + }, + { + "Type": "Skip", + "Name": "t_set", + "Description": "set GRANULARITY 2", + "Initial Parts": 1, + "Selected Parts": 1, + "Initial Granules": 2, + "Selected Granules": 1 + } +] +``` + +With `actions` = 1, added keys depend on step type. + +Example: + +```sql +EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; +``` + +```json +[ + { + "Plan": { + "Node Type": "Expression", + "Expression": { + "Inputs": [], + "Actions": [ + { + "Node Type": "Column", + "Result Type": "UInt8", + "Result Type": "Column", + "Column": "Const(UInt8)", + "Arguments": [], + "Removed Arguments": [], + "Result": 0 + } + ], + "Outputs": [ + { + "Name": "1", + "Type": "UInt8" + } + ], + "Positions": [0], + "Project Input": true + }, + "Plans": [ + { + "Node Type": "SettingQuotaAndLimits", + "Plans": [ + { + "Node Type": "ReadFromStorage" + } + ] + } + ] + } + } +] +``` ### EXPLAIN PIPELINE {#explain-pipeline} Settings: -- `header` — Print header for each output port. Default: 0. -- `graph` — Use DOT graph description language. Default: 0. -- `compact` — Print graph in compact mode if graph is enabled. Default: 1. +- `header` — Prints header for each output port. Default: 0. +- `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0. +- `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1. Example: diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 0afc9b5b95f..8ca2b25ce66 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -49,7 +49,7 @@ It means that `john` has the permission to execute: - `SELECT x FROM db.table`. - `SELECT y FROM db.table`. -`john` can’t execute `SELECT z FROM db.table`. The `SELECT * FROM db.table` also is not available. Processing this query, ClickHouse doesn’t return any data, even `x` and `y`. The only exception is if a table contains only `x` and `y` columns. In this case ClickHouse returns all the data. +`john` can’t execute `SELECT z FROM db.table`. The `SELECT * FROM db.table` also is not available. Processing this query, ClickHouse does not return any data, even `x` and `y`. The only exception is if a table contains only `x` and `y` columns. In this case ClickHouse returns all the data. Also `john` has the `GRANT OPTION` privilege, so it can grant other users with privileges of the same or smaller scope. @@ -91,7 +91,7 @@ Hierarchy of privileges: - `ALTER ADD CONSTRAINT` - `ALTER DROP CONSTRAINT` - `ALTER TTL` - - `ALTER MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL` - `ALTER SETTINGS` - `ALTER MOVE PARTITION` - `ALTER FETCH PARTITION` @@ -102,9 +102,9 @@ Hierarchy of privileges: - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` + - `CREATE TEMPORARY TABLE` - `CREATE VIEW` - `CREATE DICTIONARY` - - `CREATE TEMPORARY TABLE` - [DROP](#grant-drop) - `DROP DATABASE` - `DROP TABLE` @@ -150,7 +150,7 @@ Hierarchy of privileges: - `SYSTEM RELOAD` - `SYSTEM RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES` - `SYSTEM TTL MERGES` - `SYSTEM FETCHES` @@ -230,7 +230,7 @@ Consider the following privilege: GRANT SELECT(x,y) ON db.table TO john ``` -This privilege allows `john` to execute any `SELECT` query that involves data from the `x` and/or `y` columns in `db.table`, for example, `SELECT x FROM db.table`. `john` can’t execute `SELECT z FROM db.table`. The `SELECT * FROM db.table` also is not available. Processing this query, ClickHouse doesn’t return any data, even `x` and `y`. The only exception is if a table contains only `x` and `y` columns, in this case ClickHouse returns all the data. +This privilege allows `john` to execute any `SELECT` query that involves data from the `x` and/or `y` columns in `db.table`, for example, `SELECT x FROM db.table`. `john` can’t execute `SELECT z FROM db.table`. The `SELECT * FROM db.table` also is not available. Processing this query, ClickHouse does not return any data, even `x` and `y`. The only exception is if a table contains only `x` and `y` columns, in this case ClickHouse returns all the data. ### INSERT {#grant-insert} @@ -240,7 +240,7 @@ Privilege level: `COLUMN`. **Description** -User granted with this privilege can execute `INSERT` queries over a specified list of columns in the specified table and database. If user includes other columns then specified a query doesn’t insert any data. +User granted with this privilege can execute `INSERT` queries over a specified list of columns in the specified table and database. If user includes other columns then specified a query does not insert any data. **Example** @@ -276,10 +276,10 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries - `ALTER ADD CONSTRAINT`. Level: `TABLE`. Aliases: `ADD CONSTRAINT` - `ALTER DROP CONSTRAINT`. Level: `TABLE`. Aliases: `DROP CONSTRAINT` - `ALTER TTL`. Level: `TABLE`. Aliases: `ALTER MODIFY TTL`, `MODIFY TTL` - - `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL` - `ALTER SETTINGS`. Level: `TABLE`. Aliases: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER MOVE PARTITION`. Level: `TABLE`. Aliases: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `FETCH PARTITION` + - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `ALTER FETCH PART`, `FETCH PARTITION`, `FETCH PART` - `ALTER FREEZE PARTITION`. Level: `TABLE`. Aliases: `FREEZE PARTITION` - `ALTER VIEW` Level: `GROUP` - `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW` @@ -292,7 +292,7 @@ Examples of how this hierarchy is treated: **Notes** -- The `MODIFY SETTING` privilege allows modifying table engine settings. It doesn’t affect settings or server configuration parameters. +- The `MODIFY SETTING` privilege allows modifying table engine settings. It does not affect settings or server configuration parameters. - The `ATTACH` operation needs the [CREATE](#grant-create) privilege. - The `DETACH` operation needs the [DROP](#grant-drop) privilege. - To stop mutation by the [KILL MUTATION](../../sql-reference/statements/misc.md#kill-mutation) query, you need to have a privilege to start this mutation. For example, if you want to stop the `ALTER UPDATE` query, you need the `ALTER UPDATE`, `ALTER TABLE`, or `ALTER` privilege. @@ -304,9 +304,9 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A - `CREATE`. Level: `GROUP` - `CREATE DATABASE`. Level: `DATABASE` - `CREATE TABLE`. Level: `TABLE` + - `CREATE TEMPORARY TABLE`. Level: `GLOBAL` - `CREATE VIEW`. Level: `VIEW` - `CREATE DICTIONARY`. Level: `DICTIONARY` - - `CREATE TEMPORARY TABLE`. Level: `GLOBAL` **Notes** @@ -316,7 +316,7 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A Allows executing [DROP](../../sql-reference/statements/misc.md#drop) and [DETACH](../../sql-reference/statements/misc.md#detach) queries according to the following hierarchy of privileges: -- `DROP`. Level: +- `DROP`. Level: `GROUP` - `DROP DATABASE`. Level: `DATABASE` - `DROP TABLE`. Level: `TABLE` - `DROP VIEW`. Level: `VIEW` @@ -401,7 +401,7 @@ Allows a user to execute [SYSTEM](../../sql-reference/statements/system.md) quer - `SYSTEM RELOAD`. Level: `GROUP` - `SYSTEM RELOAD CONFIG`. Level: `GLOBAL`. Aliases: `RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY`. Level: `GLOBAL`. Aliases: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: R`ELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: `RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES` - `SYSTEM TTL MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES` - `SYSTEM FETCHES`. Level: `TABLE`. Aliases: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES` diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md index 66effcccc3f..db10ddd47c6 100644 --- a/docs/en/sql-reference/statements/insert-into.md +++ b/docs/en/sql-reference/statements/insert-into.md @@ -57,7 +57,7 @@ SELECT * FROM insert_select_testtable; In this example, we see that the second inserted row has `a` and `c` columns filled by the passed values, and `b` filled with value by default. -If a list of columns doesn't include all existing columns, the rest of the columns are filled with: +If a list of columns does not include all existing columns, the rest of the columns are filled with: - The values calculated from the `DEFAULT` expressions specified in the table definition. - Zeros and empty strings, if `DEFAULT` expressions are not defined. @@ -105,6 +105,8 @@ However, you can delete old data using `ALTER TABLE ... DROP PARTITION`. `FORMAT` clause must be specified in the end of query if `SELECT` clause contains table function [input()](../../sql-reference/table-functions/input.md). +To insert a default value instead of `NULL` into a column with not nullable data type, enable [insert_null_as_default](../../operations/settings/settings.md#insert_null_as_default) setting. + ### Performance Considerations {#performance-considerations} `INSERT` sorts the input data by primary key and splits them into partitions by a partition key. If you insert data into several partitions at once, it can significantly reduce the performance of the `INSERT` query. To avoid this: diff --git a/docs/en/sql-reference/statements/kill.md b/docs/en/sql-reference/statements/kill.md index 6aa09cca4ef..eab6f602c4a 100644 --- a/docs/en/sql-reference/statements/kill.md +++ b/docs/en/sql-reference/statements/kill.md @@ -31,7 +31,7 @@ KILL QUERY WHERE user='username' SYNC Read-only users can only stop their own queries. -By default, the asynchronous version of queries is used (`ASYNC`), which doesn’t wait for confirmation that queries have stopped. +By default, the asynchronous version of queries is used (`ASYNC`), which does not wait for confirmation that queries have stopped. The synchronous version (`SYNC`) waits for all queries to stop and displays information about each process as it stops. The response contains the `kill_status` column, which can take the following values: diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 49a7404d76e..69e2caeb322 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -5,28 +5,34 @@ toc_title: OPTIMIZE # OPTIMIZE Statement {#misc_operations-optimize} +This query tries to initialize an unscheduled merge of data parts for tables. + +!!! warning "Warning" + `OPTIMIZE` can’t fix the `Too many parts` error. + +**Syntax** + ``` sql OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]] ``` -This query tries to initialize an unscheduled merge of data parts for tables with a table engine from the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family. - -The `OPTMIZE` query is also supported for the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported. +The `OPTMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported. When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled). -- If `OPTIMIZE` doesn’t perform a merge for any reason, it doesn’t notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting. +- If `OPTIMIZE` does not perform a merge for any reason, it does not notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting. - If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](../../sql-reference/statements/alter/index.md#alter-how-to-specify-part-expr). - If you specify `FINAL`, optimization is performed even when all the data is already in one part. Also merge is forced even if concurrent merges are performed. - If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine. -### BY expression {#by-expression} +## BY expression {#by-expression} If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key). -Note that `*` behaves just like in `SELECT`: `MATERIALIZED`, and `ALIAS` columns are not used for expansion. -Also, it is an error to specify empty list of columns, or write an expression that results in an empty list of columns, or deduplicate by an ALIAS column. +!!! note "Note" + Notice that `*` behaves just like in `SELECT`: `MATERIALIZED` and `ALIAS` columns are not used for expansion. + Also, it is an error to specify empty list of columns, or write an expression that results in an empty list of columns, or deduplicate by an ALIAS column. ``` sql OPTIMIZE TABLE table DEDUPLICATE; -- the old one @@ -39,9 +45,10 @@ OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT co OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT (colX, colY); ``` -**Example:** +**Examples** + +Create a table: -A silly synthetic table. ``` sql CREATE TABLE example ( primary_key Int32, @@ -56,31 +63,31 @@ PARTITION BY partition_key ORDER BY (primary_key, secondary_key); ``` +The 'old' deduplicate, all columns are taken into account, i.e. row is removed only if all values in all columns are equal to corresponding values in previous row. + ``` sql --- The 'old' deduplicate, all columns are taken into account, i.e. row is removed only if all values in all columns are equal to corresponding values in previous row. OPTIMIZE TABLE example FINAL DEDUPLICATE; ``` +Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key`, and `materialized_value` columns. + ``` sql --- Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key`, and `materialized_value` columns. OPTIMIZE TABLE example FINAL DEDUPLICATE BY *; ``` +Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED` and explicitly not `materialized_value`: `primary_key`, `secondary_key`, `value`, and `partition_key` columns. + ``` sql --- Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED` and explicitly not `materialized_value`: `primary_key`, `secondary_key`, `value`, and `partition_key` columns. OPTIMIZE TABLE example FINAL DEDUPLICATE BY * EXCEPT materialized_value; ``` +Deduplicate explicitly by `primary_key`, `secondary_key`, and `partition_key` columns. ``` sql --- Deduplicate explicitly by `primary_key`, `secondary_key`, and `partition_key` columns. OPTIMIZE TABLE example FINAL DEDUPLICATE BY primary_key, secondary_key, partition_key; ``` +Deduplicate by any column matching a regex: `primary_key`, `secondary_key`, and `partition_key` columns. + ``` sql --- Deduplicate by any column matching a regex: `primary_key`, `secondary_key`, and `partition_key` columns. OPTIMIZE TABLE example FINAL DEDUPLICATE BY COLUMNS('.*_key'); ``` - - -!!! warning "Warning" - `OPTIMIZE` can’t fix the “Too many parts” error. diff --git a/docs/en/sql-reference/statements/rename.md b/docs/en/sql-reference/statements/rename.md index 4f14ad016a3..4f454626493 100644 --- a/docs/en/sql-reference/statements/rename.md +++ b/docs/en/sql-reference/statements/rename.md @@ -5,10 +5,18 @@ toc_title: RENAME # RENAME Statement {#misc_operations-rename} +## RENAME DATABASE {#misc_operations-rename_database} +Renames database, support only for Atomic database engine + +``` +RENAME DATABASE atomic_database1 TO atomic_database2 [ON CLUSTER cluster] +``` + +## RENAME TABLE {#misc_operations-rename_table} Renames one or more tables. ``` sql RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] ``` -Renaming tables is a light operation. If you indicated another database after `TO`, the table will be moved to this database. However, the directories with databases must reside in the same file system (otherwise, an error is returned). If you rename multiple tables in one query, this is a non-atomic operation, it may be partially executed, queries in other sessions may receive the error `Table ... doesn't exist ..`. +Renaming tables is a light operation. If you indicated another database after `TO`, the table will be moved to this database. However, the directories with databases must reside in the same file system (otherwise, an error is returned). If you rename multiple tables in one query, this is a non-atomic operation, it may be partially executed, queries in other sessions may receive the error `Table ... does not exist ..`. diff --git a/docs/en/sql-reference/statements/select/from.md b/docs/en/sql-reference/statements/select/from.md index 3ecb5096ab8..7c5ea732122 100644 --- a/docs/en/sql-reference/statements/select/from.md +++ b/docs/en/sql-reference/statements/select/from.md @@ -29,7 +29,7 @@ Now `SELECT` queries with `FINAL` are executed in parallel and slightly faster. ### Drawbacks {#drawbacks} -Queries that use `FINAL` are executed slightly slower than similar queries that don’t, because: +Queries that use `FINAL` are executed slightly slower than similar queries that do not, because: - Data is merged during query execution. - Queries with `FINAL` read primary key columns in addition to the columns specified in the query. diff --git a/docs/en/sql-reference/statements/select/group-by.md b/docs/en/sql-reference/statements/select/group-by.md index a07c810fae8..e6affc07b78 100644 --- a/docs/en/sql-reference/statements/select/group-by.md +++ b/docs/en/sql-reference/statements/select/group-by.md @@ -208,7 +208,7 @@ This extra row is only produced in `JSON*`, `TabSeparated*`, and `Pretty*` forma ### Configuring Totals Processing {#configuring-totals-processing} -By default, `totals_mode = 'before_having'`. In this case, ‘totals’ is calculated across all rows, including the ones that don’t pass through HAVING and `max_rows_to_group_by`. +By default, `totals_mode = 'before_having'`. In this case, ‘totals’ is calculated across all rows, including the ones that do not pass through HAVING and `max_rows_to_group_by`. The other alternatives include only the rows that pass through HAVING in ‘totals’, and behave differently with the setting `max_rows_to_group_by` and `group_by_overflow_mode = 'any'`. @@ -274,4 +274,4 @@ When merging data flushed to the disk, as well as when merging results from remo When external aggregation is enabled, if there was less than `max_bytes_before_external_group_by` of data (i.e. data was not flushed), the query runs just as fast as without external aggregation. If any temporary data was flushed, the run time will be several times longer (approximately three times). -If you have an [ORDER BY](../../../sql-reference/statements/select/order-by.md) with a [LIMIT](../../../sql-reference/statements/select/limit.md) after `GROUP BY`, then the amount of used RAM depends on the amount of data in `LIMIT`, not in the whole table. But if the `ORDER BY` doesn’t have `LIMIT`, don’t forget to enable external sorting (`max_bytes_before_external_sort`). +If you have an [ORDER BY](../../../sql-reference/statements/select/order-by.md) with a [LIMIT](../../../sql-reference/statements/select/limit.md) after `GROUP BY`, then the amount of used RAM depends on the amount of data in `LIMIT`, not in the whole table. But if the `ORDER BY` does not have `LIMIT`, do not forget to enable external sorting (`max_bytes_before_external_sort`). diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index ada4699c224..2f2ce943225 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -47,6 +47,7 @@ Specifics of each optional clause are covered in separate sections, which are li - [SELECT clause](#select-clause) - [DISTINCT clause](../../../sql-reference/statements/select/distinct.md) - [LIMIT clause](../../../sql-reference/statements/select/limit.md) +- [OFFSET clause](../../../sql-reference/statements/select/offset.md) - [UNION clause](../../../sql-reference/statements/select/union.md) - [INTO OUTFILE clause](../../../sql-reference/statements/select/into-outfile.md) - [FORMAT clause](../../../sql-reference/statements/select/format.md) @@ -100,7 +101,7 @@ SELECT COLUMNS('a'), COLUMNS('c'), toTypeName(COLUMNS('c')) FROM col_names └────┴────┴────┴────────────────┘ ``` -Each column returned by the `COLUMNS` expression is passed to the function as a separate argument. Also you can pass other arguments to the function if it supports them. Be careful when using functions. If a function doesn’t support the number of arguments you have passed to it, ClickHouse throws an exception. +Each column returned by the `COLUMNS` expression is passed to the function as a separate argument. Also you can pass other arguments to the function if it supports them. Be careful when using functions. If a function does not support the number of arguments you have passed to it, ClickHouse throws an exception. For example: @@ -110,12 +111,12 @@ SELECT COLUMNS('a') + COLUMNS('c') FROM col_names ``` text Received exception from server (version 19.14.1): -Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of arguments for function plus doesn't match: passed 3, should be 2. +Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of arguments for function plus does not match: passed 3, should be 2. ``` In this example, `COLUMNS('a')` returns two columns: `aa` and `ab`. `COLUMNS('c')` returns the `bc` column. The `+` operator can’t apply to 3 arguments, so ClickHouse throws an exception with the relevant message. -Columns that matched the `COLUMNS` expression can have different data types. If `COLUMNS` doesn’t match any columns and is the only expression in `SELECT`, ClickHouse throws an exception. +Columns that matched the `COLUMNS` expression can have different data types. If `COLUMNS` does not match any columns and is the only expression in `SELECT`, ClickHouse throws an exception. ### Asterisk {#asterisk} @@ -127,7 +128,7 @@ You can put an asterisk in any part of a query instead of an expression. When th - When there is strong filtration on a small number of columns using `PREWHERE`. - In subqueries (since columns that aren’t needed for the external query are excluded from subqueries). -In all other cases, we don’t recommend using the asterisk, since it only gives you the drawbacks of a columnar DBMS instead of the advantages. In other words using the asterisk is not recommended. +In all other cases, we do not recommend using the asterisk, since it only gives you the drawbacks of a columnar DBMS instead of the advantages. In other words using the asterisk is not recommended. ### Extreme Values {#extreme-values} diff --git a/docs/en/sql-reference/statements/select/limit.md b/docs/en/sql-reference/statements/select/limit.md index 4b25efbe95a..6ed38b2dd64 100644 --- a/docs/en/sql-reference/statements/select/limit.md +++ b/docs/en/sql-reference/statements/select/limit.md @@ -12,6 +12,9 @@ toc_title: LIMIT If there is no [ORDER BY](../../../sql-reference/statements/select/order-by.md) clause that explicitly sorts results, the choice of rows for the result may be arbitrary and non-deterministic. +!!! note "Note" + The number of rows in the result set can also depend on the [limit](../../../operations/settings/settings.md#limit) setting. + ## LIMIT … WITH TIES Modifier {#limit-with-ties} When you set `WITH TIES` modifier for `LIMIT n[,m]` and specify `ORDER BY expr_list`, you will get in result first `n` or `n,m` rows and all rows with same `ORDER BY` fields values equal to row at position `n` for `LIMIT n` and `m` for `LIMIT n,m`. diff --git a/docs/en/sql-reference/statements/select/offset.md b/docs/en/sql-reference/statements/select/offset.md new file mode 100644 index 00000000000..3efd916bcb8 --- /dev/null +++ b/docs/en/sql-reference/statements/select/offset.md @@ -0,0 +1,86 @@ +--- +toc_title: OFFSET +--- + +# OFFSET FETCH Clause {#offset-fetch} + +`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query. + +``` sql +OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] +``` + +The `offset_row_count` or `fetch_row_count` value can be a number or a literal constant. You can omit `fetch_row_count`; by default, it equals to 1. + +`OFFSET` specifies the number of rows to skip before starting to return rows from the query result set. + +The `FETCH` specifies the maximum number of rows that can be in the result of a query. + +The `ONLY` option is used to return rows that immediately follow the rows omitted by the `OFFSET`. In this case the `FETCH` is an alternative to the [LIMIT](../../../sql-reference/statements/select/limit.md) clause. For example, the following query + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; +``` + +is identical to the query + +``` sql +SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; +``` + +The `WITH TIES` option is used to return any additional rows that tie for the last place in the result set according to the `ORDER BY` clause. For example, if `fetch_row_count` is set to 5 but two additional rows match the values of the `ORDER BY` columns in the fifth row, the result set will contain seven rows. + +!!! note "Note" + According to the standard, the `OFFSET` clause must come before the `FETCH` clause if both are present. + +!!! note "Note" + The real offset can also depend on the [offset](../../../operations/settings/settings.md#offset) setting. + +## Examples {#examples} + +Input table: + +``` text +┌─a─┬─b─┐ +│ 1 │ 1 │ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 1 │ 3 │ +│ 5 │ 4 │ +│ 0 │ 6 │ +│ 5 │ 7 │ +└───┴───┘ +``` + +Usage of the `ONLY` option: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; +``` + +Result: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +└───┴───┘ +``` + +Usage of the `WITH TIES` option: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; +``` + +Result: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +│ 5 │ 7 │ +└───┴───┘ +``` diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index fb1df445db1..a8fec5cfa26 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -252,11 +252,11 @@ External sorting works much less effectively than sorting in RAM. If `ORDER BY` expression has a prefix that coincides with the table sorting key, you can optimize the query by using the [optimize_read_in_order](../../../operations/settings/settings.md#optimize_read_in_order) setting. - When the `optimize_read_in_order` setting is enabled, the Clickhouse server uses the table index and reads the data in order of the `ORDER BY` key. This allows to avoid reading all data in case of specified [LIMIT](../../../sql-reference/statements/select/limit.md). So queries on big data with small limit are processed faster. + When the `optimize_read_in_order` setting is enabled, the ClickHouse server uses the table index and reads the data in order of the `ORDER BY` key. This allows to avoid reading all data in case of specified [LIMIT](../../../sql-reference/statements/select/limit.md). So queries on big data with small limit are processed faster. -Optimization works with both `ASC` and `DESC` and doesn't work together with [GROUP BY](../../../sql-reference/statements/select/group-by.md) clause and [FINAL](../../../sql-reference/statements/select/from.md#select-from-final) modifier. +Optimization works with both `ASC` and `DESC` and does not work together with [GROUP BY](../../../sql-reference/statements/select/group-by.md) clause and [FINAL](../../../sql-reference/statements/select/from.md#select-from-final) modifier. -When the `optimize_read_in_order` setting is disabled, the Clickhouse server does not use the table index while processing `SELECT` queries. +When the `optimize_read_in_order` setting is disabled, the ClickHouse server does not use the table index while processing `SELECT` queries. Consider disabling `optimize_read_in_order` manually, when running queries that have `ORDER BY` clause, large `LIMIT` and [WHERE](../../../sql-reference/statements/select/where.md) condition that requires to read huge amount of records before queried data is found. @@ -265,7 +265,7 @@ Optimization is supported in the following table engines: - [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) - [Merge](../../../engines/table-engines/special/merge.md), [Buffer](../../../engines/table-engines/special/buffer.md), and [MaterializedView](../../../engines/table-engines/special/materializedview.md) table engines over `MergeTree`-engine tables -In `MaterializedView`-engine tables the optimization works with views like `SELECT ... FROM merge_tree_table ORDER BY pk`. But it is not supported in the queries like `SELECT ... FROM view ORDER BY pk` if the view query doesn't have the `ORDER BY` clause. +In `MaterializedView`-engine tables the optimization works with views like `SELECT ... FROM merge_tree_table ORDER BY pk`. But it is not supported in the queries like `SELECT ... FROM view ORDER BY pk` if the view query does not have the `ORDER BY` clause. ## ORDER BY Expr WITH FILL Modifier {#orderby-with-fill} @@ -364,7 +364,7 @@ returns └────────────┴────────────┴──────────┘ ``` -Field `d1` doesn’t fill and use default value cause we don’t have repeated values for `d2` value, and sequence for `d1` can’t be properly calculated. +Field `d1` does not fill and use default value cause we do not have repeated values for `d2` value, and sequence for `d1` can’t be properly calculated. The following query with a changed field in `ORDER BY` @@ -400,84 +400,4 @@ returns └────────────┴────────────┴──────────┘ ``` -## OFFSET FETCH Clause {#offset-fetch} - -`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query. - -``` sql -OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] -``` - -The `offset_row_count` or `fetch_row_count` value can be a number or a literal constant. You can omit `fetch_row_count`; by default, it equals 1. - -`OFFSET` specifies the number of rows to skip before starting to return rows from the query. - -The `FETCH` specifies the maximum number of rows that can be in the result of a query. - -The `ONLY` option is used to return rows that immediately follow the rows omitted by the `OFFSET`. In this case the `FETCH` is an alternative to the [LIMIT](../../../sql-reference/statements/select/limit.md) clause. For example, the following query - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; -``` - -is identical to the query - -``` sql -SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; -``` - -The `WITH TIES` option is used to return any additional rows that tie for the last place in the result set according to the `ORDER BY` clause. For example, if `fetch_row_count` is set to 5 but two additional rows match the values of the `ORDER BY` columns in the fifth row, the result set will contain seven rows. - -!!! note "Note" - According to the standard, the `OFFSET` clause must come before the `FETCH` clause if both are present. - -### Examples {#examples} - -Input table: - -``` text -┌─a─┬─b─┐ -│ 1 │ 1 │ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 1 │ 3 │ -│ 5 │ 4 │ -│ 0 │ 6 │ -│ 5 │ 7 │ -└───┴───┘ -``` - -Usage of the `ONLY` option: - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; -``` - -Result: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 5 │ 4 │ -└───┴───┘ -``` - -Usage of the `WITH TIES` option: - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; -``` - -Result: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 5 │ 4 │ -│ 5 │ 7 │ -└───┴───┘ -``` - [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/order-by/) diff --git a/docs/en/sql-reference/statements/select/prewhere.md b/docs/en/sql-reference/statements/select/prewhere.md index fc43d1de0a1..663b84f2d48 100644 --- a/docs/en/sql-reference/statements/select/prewhere.md +++ b/docs/en/sql-reference/statements/select/prewhere.md @@ -16,6 +16,9 @@ A query may simultaneously specify `PREWHERE` and `WHERE`. In this case, `PREWHE If the `optimize_move_to_prewhere` setting is set to 0, heuristics to automatically move parts of expressions from `WHERE` to `PREWHERE` are disabled. +!!! note "Attention" + The `PREWHERE` section is executed before` FINAL`, so the results of `FROM FINAL` queries may be skewed when using` PREWHERE` with fields not in the `ORDER BY` section of a table. + ## Limitations {#limitations} `PREWHERE` is only supported by tables from the `*MergeTree` family. diff --git a/docs/en/sql-reference/statements/select/sample.md b/docs/en/sql-reference/statements/select/sample.md index 55c1919b81d..2ed0a804736 100644 --- a/docs/en/sql-reference/statements/select/sample.md +++ b/docs/en/sql-reference/statements/select/sample.md @@ -11,7 +11,7 @@ When data sampling is enabled, the query is not performed on all the data, but o Approximated query processing can be useful in the following cases: - When you have strict timing requirements (like \<100ms) but you can’t justify the cost of additional hardware resources to meet them. -- When your raw data is not accurate, so approximation doesn’t noticeably degrade the quality. +- When your raw data is not accurate, so approximation does not noticeably degrade the quality. - Business requirements target approximate results (for cost-effectiveness, or to market exact results to premium users). !!! note "Note" @@ -59,7 +59,7 @@ In this case, the query is executed on a sample of at least `n` rows (but not si Since the minimum unit for data reading is one granule (its size is set by the `index_granularity` setting), it makes sense to set a sample that is much larger than the size of the granule. -When using the `SAMPLE n` clause, you don’t know which relative percent of data was processed. So you don’t know the coefficient the aggregate functions should be multiplied by. Use the `_sample_factor` virtual column to get the approximate result. +When using the `SAMPLE n` clause, you do not know which relative percent of data was processed. So you do not know the coefficient the aggregate functions should be multiplied by. Use the `_sample_factor` virtual column to get the approximate result. The `_sample_factor` column contains relative coefficients that are calculated dynamically. This column is created automatically when you [create](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) a table with the specified sampling key. The usage examples of the `_sample_factor` column are shown below. @@ -79,7 +79,7 @@ FROM visits SAMPLE 10000000 ``` -The example below shows how to calculate the average session duration. Note that you don’t need to use the relative coefficient to calculate the average values. +The example below shows how to calculate the average session duration. Note that you do not need to use the relative coefficient to calculate the average values. ``` sql SELECT avg(Duration) diff --git a/docs/en/sql-reference/statements/select/union.md b/docs/en/sql-reference/statements/select/union.md index cf18ff7a4a2..6cedfb89787 100644 --- a/docs/en/sql-reference/statements/select/union.md +++ b/docs/en/sql-reference/statements/select/union.md @@ -78,4 +78,10 @@ Result: Queries that are parts of `UNION/UNION ALL/UNION DISTINCT` can be run simultaneously, and their results can be mixed together. +**See Also** + +- [insert_null_as_default](../../../operations/settings/settings.md#insert_null_as_default) setting. +- [union_default_mode](../../../operations/settings/settings.md#union-default-mode) setting. + + [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/union/) diff --git a/docs/en/sql-reference/statements/select/with.md b/docs/en/sql-reference/statements/select/with.md index 6a0564a8ede..0958f651847 100644 --- a/docs/en/sql-reference/statements/select/with.md +++ b/docs/en/sql-reference/statements/select/with.md @@ -4,7 +4,7 @@ toc_title: WITH # WITH Clause {#with-clause} -Clickhouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression. +ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression. ## Syntax diff --git a/docs/en/sql-reference/statements/show.md b/docs/en/sql-reference/statements/show.md index 7b3f709b876..a78ef38241f 100644 --- a/docs/en/sql-reference/statements/show.md +++ b/docs/en/sql-reference/statements/show.md @@ -240,7 +240,7 @@ If user is not specified, the query returns privileges for the current user. Shows parameters that were used at a [user creation](../../sql-reference/statements/create/user.md). -`SHOW CREATE USER` doesn’t output user passwords. +`SHOW CREATE USER` does not output user passwords. ### Syntax {#show-create-user-syntax} diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 725024efe0c..9397d7002fd 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -169,7 +169,7 @@ SYSTEM START MERGES [ON VOLUME | [db.]merge_tree_family_table_name ### STOP TTL MERGES {#query_language-stop-ttl-merges} Provides possibility to stop background delete old data according to [TTL expression](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) for tables in the MergeTree family: -Return `Ok.` even table doesn’t exists or table have not MergeTree engine. Return error when database doesn’t exists: +Returns `Ok.` even if table does not exist or table has not MergeTree engine. Returns error when database does not exist: ``` sql SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name] @@ -178,7 +178,7 @@ SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name] ### START TTL MERGES {#query_language-start-ttl-merges} Provides possibility to start background delete old data according to [TTL expression](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) for tables in the MergeTree family: -Return `Ok.` even table doesn’t exists. Return error when database doesn’t exists: +Returns `Ok.` even if table does not exist. Returns error when database does not exist: ``` sql SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name] @@ -187,7 +187,7 @@ SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name] ### STOP MOVES {#query_language-stop-moves} Provides possibility to stop background move data according to [TTL table expression with TO VOLUME or TO DISK clause](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: -Return `Ok.` even table doesn’t exists. Return error when database doesn’t exists: +Returns `Ok.` even if table does not exist. Returns error when database does not exist: ``` sql SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] @@ -196,10 +196,10 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] ### START MOVES {#query_language-start-moves} Provides possibility to start background move data according to [TTL table expression with TO VOLUME and TO DISK clause](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: -Return `Ok.` even table doesn’t exists. Return error when database doesn’t exists: +Returns `Ok.` even if table does not exist. Returns error when database does not exist: ``` sql -SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] +SYSTEM START MOVES [[db.]merge_tree_family_table_name] ``` ## Managing ReplicatedMergeTree Tables {#query-language-system-replicated} @@ -209,7 +209,7 @@ ClickHouse can manage background replication related processes in [ReplicatedMer ### STOP FETCHES {#query_language-system-stop-fetches} Provides possibility to stop background fetches for inserted parts for tables in the `ReplicatedMergeTree` family: -Always returns `Ok.` regardless of the table engine and even table or database doesn’t exists. +Always returns `Ok.` regardless of the table engine and even if table or database does not exist. ``` sql SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name] @@ -218,7 +218,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name] ### START FETCHES {#query_language-system-start-fetches} Provides possibility to start background fetches for inserted parts for tables in the `ReplicatedMergeTree` family: -Always returns `Ok.` regardless of the table engine and even table or database doesn’t exists. +Always returns `Ok.` regardless of the table engine and even if table or database does not exist. ``` sql SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name] @@ -264,6 +264,8 @@ Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name ``` +After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. + ### RESTART REPLICA {#query_language-system-restart-replica} Provides possibility to reinitialize Zookeeper sessions state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed @@ -276,4 +278,3 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name ### RESTART REPLICAS {#query_language-system-restart-replicas} Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed - diff --git a/docs/en/sql-reference/statements/truncate.md b/docs/en/sql-reference/statements/truncate.md index a13936c88ab..f302a8605e2 100644 --- a/docs/en/sql-reference/statements/truncate.md +++ b/docs/en/sql-reference/statements/truncate.md @@ -11,4 +11,4 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Removes all data from a table. When the clause `IF EXISTS` is omitted, the query returns an error if the table does not exist. -The `TRUNCATE` query is not supported for [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md) and [Null](../../engines/table-engines/special/null.md) table engines. +The `TRUNCATE` query is not supported for [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) and [Null](../../engines/table-engines/special/null.md) table engines. diff --git a/docs/en/sql-reference/syntax.md b/docs/en/sql-reference/syntax.md index 5d0eee76393..573e35d2f71 100644 --- a/docs/en/sql-reference/syntax.md +++ b/docs/en/sql-reference/syntax.md @@ -171,7 +171,7 @@ Received exception from server (version 18.14.17): Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. ``` -In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. +In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. You can change this default behavior by setting [prefer_column_name_to_alias](../operations/settings/settings.md#prefer_column_name_to_alias) to `1`. ## Asterisk {#asterisk} diff --git a/docs/en/sql-reference/table-functions/cluster.md b/docs/en/sql-reference/table-functions/cluster.md index b85542d784f..2856e66db9b 100644 --- a/docs/en/sql-reference/table-functions/cluster.md +++ b/docs/en/sql-reference/table-functions/cluster.md @@ -24,7 +24,7 @@ clusterAllReplicas('cluster_name', db, table[, sharding_key]) `sharding_key` - When insert into cluster function with more than one shard, sharding_key need to be provided. -Using the `cluster` and `clusterAllReplicas` table functions are less efficient than creating a `Distributed` table because in this case, the server connection is re-established for every request. When processing a large number of queries, please always create the `Distributed` table ahead of time, and don’t use the `cluster` and `clusterAllReplicas` table functions. +Using the `cluster` and `clusterAllReplicas` table functions are less efficient than creating a `Distributed` table because in this case, the server connection is re-established for every request. When processing a large number of queries, please always create the `Distributed` table ahead of time, and do not use the `cluster` and `clusterAllReplicas` table functions. The `cluster` and `clusterAllReplicas` table functions can be useful in the following cases: diff --git a/docs/en/sql-reference/table-functions/index.md b/docs/en/sql-reference/table-functions/index.md index fef30c04c9d..d65a18ab985 100644 --- a/docs/en/sql-reference/table-functions/index.md +++ b/docs/en/sql-reference/table-functions/index.md @@ -21,16 +21,18 @@ You can use table functions in: !!! warning "Warning" You can’t use table functions if the [allow_ddl](../../operations/settings/permissions-for-queries.md#settings_allow_ddl) setting is disabled. -| Function | Description | -|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------| -| [file](../../sql-reference/table-functions/file.md) | Creates a File-engine table. | -| [merge](../../sql-reference/table-functions/merge.md) | Creates a Merge-engine table. | -| [numbers](../../sql-reference/table-functions/numbers.md) | Creates a table with a single column filled with integer numbers. | -| [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a Distributed-engine table. | -| [url](../../sql-reference/table-functions/url.md) | Creates a URL-engine table. | -| [mysql](../../sql-reference/table-functions/mysql.md) | Creates a MySQL-engine table. | -| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a PostgreSQL-engine table. | -| [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a JDBC-engine table. | -| [odbc](../../sql-reference/table-functions/odbc.md) | Creates a ODBC-engine table. | -| [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a HDFS-engine table. | -| [s3](../../sql-reference/table-functions/s3.md) | Creates a S3-engine table. | +| Function | Description | +|------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------| +| [file](../../sql-reference/table-functions/file.md) | Creates a [File](../../engines/table-engines/special/file.md)-engine table. | +| [merge](../../sql-reference/table-functions/merge.md) | Creates a [Merge](../../engines/table-engines/special/merge.md)-engine table. | +| [numbers](../../sql-reference/table-functions/numbers.md) | Creates a table with a single column filled with integer numbers. | +| [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a [Distributed](../../engines/table-engines/special/distributed.md)-engine table. | +| [url](../../sql-reference/table-functions/url.md) | Creates a [Url](../../engines/table-engines/special/url.md)-engine table. | +| [mysql](../../sql-reference/table-functions/mysql.md) | Creates a [MySQL](../../engines/table-engines/integrations/mysql.md)-engine table. | +| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a [PostgreSQL](../../engines/table-engines/integrations/postgresql.md)-engine table. | +| [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a [JDBC](../../engines/table-engines/integrations/jdbc.md)-engine table. | +| [odbc](../../sql-reference/table-functions/odbc.md) | Creates a [ODBC](../../engines/table-engines/integrations/odbc.md)-engine table. | +| [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a [HDFS](../../engines/table-engines/integrations/hdfs.md)-engine table. | +| [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. | + +[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/) diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index ad5d8a29904..3eab572ac12 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -10,33 +10,17 @@ Allows `SELECT` and `INSERT` queries to be performed on data that is stored on a **Syntax** ``` sql -postgresql('host:port', 'database', 'table', 'user', 'password') +postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`]) ``` **Arguments** - `host:port` — PostgreSQL server address. - - `database` — Remote database name. - - `table` — Remote table name. - - `user` — PostgreSQL user. - - `password` — User password. - - -SELECT Queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query. - -Simple `WHERE` clauses such as `=, !=, >, >=, <, <=, IN` are executed on the PostgreSQL server. - -All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes. - -INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. - -PostgreSQL Array types converts into ClickHouse arrays. - -Be careful in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows. +- `schema` — Non-default table schema. Optional. **Returned Value** @@ -45,6 +29,23 @@ A table object with the same columns as the original PostgreSQL table. !!! info "Note" In the `INSERT` query to distinguish table function `postgresql(...)` from table name with column names list you must use keywords `FUNCTION` or `TABLE FUNCTION`. See examples below. +## Implementation Details {#implementation-details} + +`SELECT` queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query. + +Simple `WHERE` clauses such as `=`, `!=`, `>`, `>=`, `<`, `<=`, and `IN` are executed on the PostgreSQL server. + +All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes. + +`INSERT` queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. + +PostgreSQL Array types converts into ClickHouse arrays. + +!!! info "Note" + Be careful, in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows. + +Supports replicas priority for PostgreSQL dictionary source. The bigger the number in map, the less the priority. The highest priority is `0`. + **Examples** Table in PostgreSQL: @@ -60,13 +61,13 @@ PRIMARY KEY (int_id)); CREATE TABLE -postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2); +postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); INSERT 0 1 -postgresql> select * from test; - int_id | int_nullable | float | str | float_nullable ---------+--------------+-------+------+---------------- - 1 | | 2 | test | +postgresql> SELECT * FROM test; + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | (1 row) ``` @@ -96,9 +97,24 @@ SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'p └────────┴──────────────┴───────┴──────┴────────────────┘ ``` +Using Non-default Schema: + +```text +postgres=# CREATE SCHEMA "nice.schema"; + +postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer); + +postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i) +``` + +```sql +CREATE TABLE pg_table_schema_with_dots (a UInt32) + ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); +``` + **See Also** -- [The ‘PostgreSQL’ table engine](../../engines/table-engines/integrations/postgresql.md) +- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md) - [Using PostgreSQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) [Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/postgresql/) diff --git a/docs/en/sql-reference/table-functions/remote.md b/docs/en/sql-reference/table-functions/remote.md index e80e58a76aa..ae399c7e612 100644 --- a/docs/en/sql-reference/table-functions/remote.md +++ b/docs/en/sql-reference/table-functions/remote.md @@ -42,7 +42,7 @@ The dataset from remote servers. **Usage** -Using the `remote` table function is less optimal than creating a `Distributed` table because in this case the server connection is re-established for every request. Also, if hostnames are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and don’t use the `remote` table function. +Using the `remote` table function is less optimal than creating a `Distributed` table because in this case the server connection is re-established for every request. Also, if hostnames are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and do not use the `remote` table function. The `remote` table function can be useful in the following cases: diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 34f0607b94c..285ec862aab 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -18,7 +18,7 @@ s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compres - `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — Parameter is optional. Supported values: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. By default, it will autodetect compression by file extension. +- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension. **Returned value** diff --git a/docs/en/sql-reference/table-functions/view.md b/docs/en/sql-reference/table-functions/view.md index e49a9f5218b..18323ec4e92 100644 --- a/docs/en/sql-reference/table-functions/view.md +++ b/docs/en/sql-reference/table-functions/view.md @@ -5,7 +5,7 @@ toc_title: view ## view {#view} -Turns a subquery into a table. The function implements views (see [CREATE VIEW](https://clickhouse.tech/docs/en/sql-reference/statements/create/view/#create-view)). The resulting table doesn't store data, but only stores the specified `SELECT` query. When reading from the table, ClickHouse executes the query and deletes all unnecessary columns from the result. +Turns a subquery into a table. The function implements views (see [CREATE VIEW](https://clickhouse.tech/docs/en/sql-reference/statements/create/view/#create-view)). The resulting table does not store data, but only stores the specified `SELECT` query. When reading from the table, ClickHouse executes the query and deletes all unnecessary columns from the result. **Syntax** diff --git a/docs/en/whats-new/changelog/2017.md b/docs/en/whats-new/changelog/2017.md index 17d3efe7bab..9ceca2b6c4a 100644 --- a/docs/en/whats-new/changelog/2017.md +++ b/docs/en/whats-new/changelog/2017.md @@ -7,7 +7,7 @@ toc_title: '2017' This release contains bug fixes for the previous release 1.1.54318: -- Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like `Part ... from own log doesn't exist.` The issue is relevant even if you don’t see these messages in logs. +- Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like `Part ... from own log does not exist.` The issue is relevant even if you do not see these messages in logs. ### ClickHouse Release 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} @@ -50,7 +50,7 @@ This release contains bug fixes for the previous release 1.1.54310: - Fixed nonatomic adding and removing of parts in Replicated tables. - Data inserted into a materialized view is not subjected to unnecessary deduplication. - Executing a query to a Distributed table for which the local replica is lagging and remote replicas are unavailable does not result in an error anymore. -- Users don’t need access permissions to the `default` database to create temporary tables anymore. +- Users do not need access permissions to the `default` database to create temporary tables anymore. - Fixed crashing when specifying the Array type without arguments. - Fixed hangups when the disk volume containing server logs is full. - Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch. @@ -138,7 +138,7 @@ This release contains bug fixes for the previous release 1.1.54310: #### Please Note When Upgrading: {#please-note-when-upgrading} -- There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT queries will fail with the message “Merges are processing significantly slower than inserts.” Use the `SELECT * FROM system.merges` query to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don’t need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the `` section in config.xml, set ``` ``107374182400 ``` and restart the server. +- There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT queries will fail with the message “Merges are processing significantly slower than inserts.” Use the `SELECT * FROM system.merges` query to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You do not need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the `` section in config.xml, set ``` ``107374182400 ``` and restart the server. ### ClickHouse Release 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} @@ -181,7 +181,7 @@ This release contains bug fixes for the previous release 1.1.54276: - Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format. - Optimized stream allocation when reading from a Distributed table. -- Settings can be configured in readonly mode if the value doesn’t change. +- Settings can be configured in readonly mode if the value does not change. - Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred_block_size_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns. - Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.` - Added new settings for MergeTree engines (the merge_tree section in config.xml): diff --git a/docs/en/whats-new/changelog/2018.md b/docs/en/whats-new/changelog/2018.md index b0c4e147352..3544c9a9b49 100644 --- a/docs/en/whats-new/changelog/2018.md +++ b/docs/en/whats-new/changelog/2018.md @@ -32,7 +32,7 @@ toc_title: '2018' - Now you can use a parameter to configure the precision of the `uniqCombined` aggregate function (select the number of HyperLogLog cells). [#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) - Added the `system.contributors` table that contains the names of everyone who made commits in ClickHouse. [#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) - Added the ability to omit the partition for the `ALTER TABLE ... FREEZE` query in order to back up all partitions at once. [#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) -- Added `dictGet` and `dictGetOrDefault` functions that don’t require specifying the type of return value. The type is determined automatically from the dictionary description. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3564) +- Added `dictGet` and `dictGetOrDefault` functions that do not require specifying the type of return value. The type is determined automatically from the dictionary description. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3564) - Now you can specify comments for a column in the table description and change it using `ALTER`. [#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) - Reading is supported for `Join` type tables with simple keys. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) - Now you can specify the options `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, and `join_overflow_mode` when creating a `Join` type table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) @@ -70,7 +70,7 @@ toc_title: '2018' #### Improvements: {#improvements-1} -- The server does not write the processed configuration files to the `/etc/clickhouse-server/` directory. Instead, it saves them in the `preprocessed_configs` directory inside `path`. This means that the `/etc/clickhouse-server/` directory doesn’t have write access for the `clickhouse` user, which improves security. [#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) +- The server does not write the processed configuration files to the `/etc/clickhouse-server/` directory. Instead, it saves them in the `preprocessed_configs` directory inside `path`. This means that the `/etc/clickhouse-server/` directory does not have write access for the `clickhouse` user, which improves security. [#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) - The `min_merge_bytes_to_use_direct_io` option is set to 10 GiB by default. A merge that forms large parts of tables from the MergeTree family will be performed in `O_DIRECT` mode, which prevents excessive page cache eviction. [#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) - Accelerated server start when there is a very large number of tables. [#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) - Added a connection pool and HTTP `Keep-Alive` for connections between replicas. [#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) @@ -291,7 +291,7 @@ toc_title: '2018' - Fixed an error when using `FINAL` with `PREWHERE`. [#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) - Fixed an error when using `PREWHERE` over columns that were added during `ALTER`. [#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) - Added a check for the absence of `arrayJoin` for `DEFAULT` and `MATERIALIZED` expressions. Previously, `arrayJoin` led to an error when inserting data. [#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) -- Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... doesn't match` or `Unknown compression method` when executing queries. [#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) +- Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... does not match` or `Unknown compression method` when executing queries. [#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) - Fixed segfault that could occur in rare cases after optimization that replaced AND chains from equality evaluations with the corresponding IN expression. [liuyimin-bytedance](https://github.com/ClickHouse/ClickHouse/pull/3339) - Minor corrections to `clickhouse-benchmark`: previously, client information was not sent to the server; now the number of queries executed is calculated more accurately when shutting down and for limiting the number of iterations. [#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) @@ -392,7 +392,7 @@ toc_title: '2018' - The operation timeout can now be configured when working with ZooKeeper. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) - You can specify an offset for `LIMIT n, m` as `LIMIT n OFFSET m`. [#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) - You can use the `SELECT TOP n` syntax as an alternative for `LIMIT`. [#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error doesn’t happen as often. +- Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error does not happen as often. - The `windowFunnel` aggregate function now supports events that meet multiple conditions. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2801) - Duplicate columns can be used in a `USING` clause for `JOIN`. [#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) - `Pretty` formats now have a limit on column alignment by width. Use the `output_format_pretty_max_column_pad_width` setting. If a value is wider, it will still be displayed in its entirety, but the other cells in the table will not be too wide. [#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) @@ -405,7 +405,7 @@ toc_title: '2018' #### Bug Fixes: {#bug-fixes-13} -- Fixed an issue with `Dictionary` tables (throws the `Size of offsets doesn't match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) +- Fixed an issue with `Dictionary` tables (throws the `Size of offsets does not match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) - Fixed a bug when merging `CollapsingMergeTree` tables if one of the data parts is empty (these parts are formed during merge or `ALTER DELETE` if all data was deleted), and the `vertical` algorithm was used for the merge. [#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) - Fixed a race condition during `DROP` or `TRUNCATE` for `Memory` tables with a simultaneous `SELECT`, which could lead to server crashes. This bug appeared in version 1.1.54388. [#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) - Fixed the possibility of data loss when inserting in `Replicated` tables if the `Session is expired` error is returned (data loss can be detected by the `ReplicatedDataLoss` metric). This error occurred in version 1.1.54378. [#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) @@ -715,7 +715,7 @@ toc_title: '2018' #### Backward Incompatible Changes: {#backward-incompatible-changes-7} - Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format. -- If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn’t have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster. +- If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and does not have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster. ### ClickHouse Release 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} @@ -1044,7 +1044,7 @@ This release contains bug fixes for the previous release 1.1.54337: #### Backward Incompatible Changes: {#backward-incompatible-changes-11} -- The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn’t have `Nullable` columns or if the type of your table is not `Log`, then you don’t need to do anything. +- The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table does not have `Nullable` columns or if the type of your table is not `Log`, then you do not need to do anything. - Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default. - The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion. - Removed the `FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird). diff --git a/docs/en/whats-new/changelog/2019.md b/docs/en/whats-new/changelog/2019.md index eacd522390f..bd86bf6ce8b 100644 --- a/docs/en/whats-new/changelog/2019.md +++ b/docs/en/whats-new/changelog/2019.md @@ -11,16 +11,16 @@ toc_title: '2019' - Fixed potential buffer overflow in decompress. Malicious user can pass fabricated compressed data that could cause read after buffer. This issue was found by Eldar Zaitov from Yandex information security team. [#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed possible server crash (`std::terminate`) when the server cannot send or write data in JSON or XML format with values of String data type (that require UTF-8 validation) or when compressing result data with Brotli algorithm or in some other rare cases. [#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed dictionaries with source from a clickhouse `VIEW`, now reading such dictionaries doesn’t cause the error `There is no query`. [#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed dictionaries with source from a clickhouse `VIEW`, now reading such dictionaries does not cause the error `There is no query`. [#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - Fixed checking if a client host is allowed by host_regexp specified in users.xml. [#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) - `RENAME TABLE` for a distributed table now renames the folder containing inserted data before sending to shards. This fixes an issue with successive renames `tableA->tableB`, `tableC->tableA`. [#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) - `range_hashed` external dictionaries created by DDL queries now allow ranges of arbitrary numeric types. [#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) - Fixed `INSERT INTO table SELECT ... FROM mysql(...)` table function. [#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) -- Fixed segfault in `INSERT INTO TABLE FUNCTION file()` while inserting into a file which doesn’t exist. Now in this case file would be created and then insert would be processed. [#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fixed segfault in `INSERT INTO TABLE FUNCTION file()` while inserting into a file which does not exist. Now in this case file would be created and then insert would be processed. [#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) - Fixed bitmapAnd error when intersecting an aggregated bitmap and a scalar bitmap. [#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) - Fixed segfault when `EXISTS` query was used without `TABLE` or `DICTIONARY` qualifier, just like `EXISTS t`. [#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed return type for functions `rand` and `randConstant` in case of nullable argument. Now functions always return `UInt32` and never `Nullable(UInt32)`. [#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed `DROP DICTIONARY IF EXISTS db.dict`, now it doesn’t throw exception if `db` doesn’t exist. [#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fixed `DROP DICTIONARY IF EXISTS db.dict`, now it does not throw exception if `db` does not exist. [#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) - If a table wasn’t completely dropped because of server crash, the server will try to restore and load it [#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) - Fixed a trivial count query for a distributed table if there are more than two shard local table. [#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) - Fixed bug that lead to a data race in DB::BlockStreamProfileInfo::calculateRowsBeforeLimit() [#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) @@ -36,7 +36,7 @@ toc_title: '2019' - Removed the mutation number from a part name in case there were no mutations. This removing improved the compatibility with older versions. [#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) - Fixed the bug that mutations are skipped for some attached parts due to their data_version are larger than the table mutation version. [#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) - Allow starting the server with redundant copies of parts after moving them to another device. [#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed the error “Sizes of columns doesn’t match” that might appear when using aggregate function columns. [#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- Fixed the error “Sizes of columns does not match” that might appear when using aggregate function columns. [#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) - Now an exception will be thrown in case of using WITH TIES alongside LIMIT BY. And now it’s possible to use TOP with LIMIT BY. [#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) - Fix dictionary reload if it has `invalidate_query`, which stopped updates and some exception on previous update tries. [#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) @@ -52,7 +52,7 @@ toc_title: '2019' - Make `bloom_filter` type of index supporting `LowCardinality` and `Nullable` [#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - Add function `isValidJSON` to check that passed string is a valid json. [#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) - Implement `arrayCompact` function [#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Memo](https://github.com/Joeywzr)) -- Created function `hex` for Decimal numbers. It works like `hex(reinterpretAsString())`, but doesn’t delete last zero bytes. [#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) +- Created function `hex` for Decimal numbers. It works like `hex(reinterpretAsString())`, but does not delete last zero bytes. [#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) - Add `arrayFill` and `arrayReverseFill` functions, which replace elements by other elements in front/back of them in the array. [#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) - Add `CRC32IEEE()`/`CRC64()` support [#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) - Implement `char` function similar to one in [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) @@ -609,12 +609,12 @@ toc_title: '2019' - Fixed the possibility of hanging queries when server is overloaded and global thread pool becomes near full. This have higher chance to happen on clusters with large number of shards (hundreds), because distributed queries allocate a thread per connection to each shard. For example, this issue may reproduce if a cluster of 330 shards is processing 30 concurrent distributed queries. This issue affects all versions starting from 19.2. [#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed logic of `arrayEnumerateUniqRanked` function. [#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fix segfault when decoding symbol table. [#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Amos Bird](https://github.com/amosbird)) -- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it does not contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - Removed extra quoting of description in `system.settings` table. [#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Avoid possible deadlock in `TRUNCATE` of Replicated table. [#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fix reading in order of sorting key. [#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Anton Popov](https://github.com/CurtizJ)) - Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix bug opened by [#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (since 19.4.0). Reproduces in queries to Distributed tables over MergeTree tables when we doesn’t query any columns (`SELECT 1`). [#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) +- Fix bug opened by [#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (since 19.4.0). Reproduces in queries to Distributed tables over MergeTree tables when we does not query any columns (`SELECT 1`). [#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) - Fixed overflow in integer division of signed type to unsigned type. The behaviour was exactly as in C or C++ language (integer promotion rules) that may be surprising. Please note that the overflow is still possible when dividing large signed number to large unsigned number or vice-versa (but that case is less usual). The issue existed in all server versions. [#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Limit maximum sleep time for throttling when `max_execution_speed` or `max_execution_speed_bytes` is set. Fixed false errors like `Estimated query execution time (inf seconds) is too long`. [#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed issues about using `MATERIALIZED` columns and aliases in `MaterializedView`. [#448](https://github.com/ClickHouse/ClickHouse/issues/448) [#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Amos Bird](https://github.com/amosbird)) [#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -639,7 +639,7 @@ toc_title: '2019' - Allow to `ATTACH` live views (for example, at the server startup) regardless to `allow_experimental_live_view` setting. [#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexey-milovidov](https://github.com/alexey-milovidov)) - For stack traces gathered by query profiler, do not include stack frames generated by the query profiler itself. [#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Now table functions `values`, `file`, `url`, `hdfs` have support for ALIAS columns. [#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Throw an exception if `config.d` file doesn’t have the corresponding root element as the config file. [#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) +- Throw an exception if `config.d` file does not have the corresponding root element as the config file. [#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) - Print extra info in exception message for `no space left on device`. [#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) - When determining shards of a `Distributed` table to be covered by a read query (for `optimize_skip_unused_shards` = 1) ClickHouse now checks conditions from both `prewhere` and `where` clauses of select statement. [#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) - Enabled `SIMDJSON` for machines without AVX2 but with SSE 4.2 and PCLMUL instruction set. [#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -657,7 +657,7 @@ toc_title: '2019' - Fixed possible deadlock of distributed queries when one of shards is localhost but the query is sent via network connection. [#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Changed semantic of multiple tables `RENAME` to avoid possible deadlocks. [#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Rewritten MySQL compatibility server to prevent loading full packet payload in memory. Decreased memory consumption for each connection to approximately `2 * DBMS_DEFAULT_BUFFER_SIZE` (read/write buffers). [#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Yuriy Baranov](https://github.com/yurriy)) -- Move AST alias interpreting logic out of parser that doesn’t have to know anything about query semantics. [#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) +- Move AST alias interpreting logic out of parser that does not have to know anything about query semantics. [#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) - Slightly more safe parsing of `NamesAndTypesList`. [#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexey-milovidov](https://github.com/alexey-milovidov)) - `clickhouse-copier`: Allow use `where_condition` from config with `partition_key` alias in query for checking partition existence (Earlier it was used only in reading data queries). [#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) - Added optional message argument in `throwIf`. ([#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) @@ -869,7 +869,7 @@ toc_title: '2019' #### Improvement {#improvement-4} -- Throws an exception if `config.d` file doesn’t have the corresponding root element as the config file [#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) +- Throws an exception if `config.d` file does not have the corresponding root element as the config file [#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) #### Performance Improvement {#performance-improvement-3} @@ -986,7 +986,7 @@ toc_title: '2019' - Fix segfault in ExternalLoader::reloadOutdated(). [#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) - Fixed the case when server may close listening sockets but not shutdown and continue serving remaining queries. You may end up with two running clickhouse-server processes. Sometimes, the server may return an error `bad_function_call` for remaining queries. [#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed useless and incorrect condition on update field for initial loading of external dictionaries via ODBC, MySQL, ClickHouse and HTTP. This fixes [#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it does not contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - Fix non-deterministic result of “uniq” aggregate function in extreme rare cases. The bug was present in all ClickHouse versions. [#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Segfault when we set a little bit too high CIDR on the function `IPv6CIDRToRange`. [#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Guillaume Tassery](https://github.com/YiuRULE)) - Fixed small memory leak when server throw many exceptions from many different contexts. [#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -1159,7 +1159,7 @@ toc_title: '2019' #### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-8} - Implemented `TestKeeper` as an implementation of ZooKeeper interface used for testing [#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexey-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) -- From now on `.sql` tests can be run isolated by server, in parallel, with random database. It allows to run them faster, add new tests with custom server configurations, and be sure that different tests doesn’t affect each other. [#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Ivan](https://github.com/abyss7)) +- From now on `.sql` tests can be run isolated by server, in parallel, with random database. It allows to run them faster, add new tests with custom server configurations, and be sure that different tests does not affect each other. [#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Ivan](https://github.com/abyss7)) - Remove `` and `` from performance tests [#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) - Fixed “select_format” performance test for `Pretty` formats [#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -1201,7 +1201,7 @@ toc_title: '2019' - Fixed UInt32 overflow bug in linear models. Allow eval ML model for non-const model argument. [#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - `ALTER TABLE ... DROP INDEX IF EXISTS ...` should not raise an exception if provided index does not exist [#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) - Fix segfault with `bitmapHasAny` in scalar subquery [#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) +- Fixed error when replication connection pool does not retry to resolve host, even when DNS cache was dropped. [#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) - Fixed `ALTER ... MODIFY TTL` on ReplicatedMergeTree. [#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Anton Popov](https://github.com/CurtizJ)) - Fix INSERT into Distributed table with MATERIALIZED column [#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) - Fix bad alloc when truncate Join storage [#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) @@ -1261,8 +1261,8 @@ toc_title: '2019' - Added `max_parts_in_total` setting for MergeTree family of tables (default: 100 000) that prevents unsafe specification of partition key #5166. [#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexey-milovidov](https://github.com/alexey-milovidov)) - `clickhouse-obfuscator`: derive seed for individual columns by combining initial seed with column name, not column position. This is intended to transform datasets with multiple related tables, so that tables will remain JOINable after transformation. [#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added functions `JSONExtractRaw`, `JSONExtractKeyAndValues`. Renamed functions `jsonExtract` to `JSONExtract`. When something goes wrong these functions return the correspondent values, not `NULL`. Modified function `JSONExtract`, now it gets the return type from its last parameter and doesn’t inject nullables. Implemented fallback to RapidJSON in case AVX2 instructions are not available. Simdjson library updated to a new version. [#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) -- Now `if` and `multiIf` functions don’t rely on the condition’s `Nullable`, but rely on the branches for sql compatibility. [#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu](https://github.com/janplus)) +- Added functions `JSONExtractRaw`, `JSONExtractKeyAndValues`. Renamed functions `jsonExtract` to `JSONExtract`. When something goes wrong these functions return the correspondent values, not `NULL`. Modified function `JSONExtract`, now it gets the return type from its last parameter and does not inject nullables. Implemented fallback to RapidJSON in case AVX2 instructions are not available. Simdjson library updated to a new version. [#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) +- Now `if` and `multiIf` functions do not rely on the condition’s `Nullable`, but rely on the branches for sql compatibility. [#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu](https://github.com/janplus)) - `In` predicate now generates `Null` result from `Null` input like the `Equal` function. [#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Jian Wu](https://github.com/janplus)) - Check the time limit every (flush_interval / poll_timeout) number of rows from Kafka. This allows to break the reading from Kafka consumer more frequently and to check the time limits for the top-level streams [#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Ivan](https://github.com/abyss7)) - Link rdkafka with bundled SASL. It should allow to use SASL SCRAM authentication [#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Ivan](https://github.com/abyss7)) @@ -1347,13 +1347,13 @@ toc_title: '2019' - Fixed bitmap functions produce wrong result. [#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Andy Yang](https://github.com/andyyzh)) - Fix element_count for hashed dictionary (do not include duplicates) [#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) - Use contents of environment variable TZ as the name for timezone. It helps to correctly detect default timezone in some cases.[#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -- Do not try to convert integers in `dictGetT` functions, because it doesn’t work correctly. Throw an exception instead. [#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- Do not try to convert integers in `dictGetT` functions, because it does not work correctly. Throw an exception instead. [#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) - Fix settings in ExternalData HTTP request. [#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) - Fix bug when parts were removed only from FS without dropping them from Zookeeper. [#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) - Fix segmentation fault in `bitmapHasAny` function. [#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -- Fixed `DROP INDEX IF EXISTS` query. Now `ALTER TABLE ... DROP INDEX IF EXISTS ...` query doesn’t raise an exception if provided index does not exist. [#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Fixed error when replication connection pool does not retry to resolve host, even when DNS cache was dropped. [#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) +- Fixed `DROP INDEX IF EXISTS` query. Now `ALTER TABLE ... DROP INDEX IF EXISTS ...` query does not raise an exception if provided index does not exist. [#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) - Fix union all supertype column. There were cases with inconsistent data and column types of resulting columns. [#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) - Skip ZNONODE during DDL query processing. Before if another node removes the znode in task queue, the one that did not process it, but already get list of children, will terminate the DDLWorker thread. [#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) @@ -1712,7 +1712,7 @@ toc_title: '2019' - Improved heuristics of “move to PREWHERE” optimization. [#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Use proper lookup tables that uses HashTable’s API for 8-bit and 16-bit keys. [#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Amos Bird](https://github.com/amosbird)) - Improved performance of string comparison. [#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Cleanup distributed DDL queue in a separate thread so that it doesn’t slow down the main loop that processes distributed DDL tasks. [#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) +- Cleanup distributed DDL queue in a separate thread so that it does not slow down the main loop that processes distributed DDL tasks. [#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) - When `min_bytes_to_use_direct_io` is set to 1, not every file was opened with O_DIRECT mode because the data size to read was sometimes underestimated by the size of one compressed block. [#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexey-milovidov](https://github.com/alexey-milovidov)) #### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-12} @@ -1839,7 +1839,7 @@ toc_title: '2019' - Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) - ClickHouse dictionaries now load within `clickhouse` process. [#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed race condition when selecting from `system.tables` may give `table does not exist` error. [#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) - `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) - Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -1941,7 +1941,7 @@ This release contains exactly the same set of patches as 19.3.6. - Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) - `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed race condition when selecting from `system.tables` may give `table does not exist` error. [#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed an issue: local ClickHouse dictionaries are loaded via TCP, but should load within process. [#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -2030,8 +2030,8 @@ This release contains exactly the same set of patches as 19.3.6. #### Performance Improvements {#performance-improvements-5} -- Add a MergeTree setting `use_minimalistic_part_header_in_zookeeper`. If enabled, Replicated tables will store compact part metadata in a single part znode. This can dramatically reduce ZooKeeper snapshot size (especially if the tables have a lot of columns). Note that after enabling this setting you will not be able to downgrade to a version that doesn’t support it. [#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) -- Add an DFA-based implementation for functions `sequenceMatch` and `sequenceCount` in case pattern doesn’t contain time. [#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Add a MergeTree setting `use_minimalistic_part_header_in_zookeeper`. If enabled, Replicated tables will store compact part metadata in a single part znode. This can dramatically reduce ZooKeeper snapshot size (especially if the tables have a lot of columns). Note that after enabling this setting you will not be able to downgrade to a version that does not support it. [#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) +- Add an DFA-based implementation for functions `sequenceMatch` and `sequenceCount` in case pattern does not contain time. [#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) - Performance improvement for integer numbers serialization. [#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Amos Bird](https://github.com/amosbird)) - Zero left padding PODArray so that -1 element is always valid and zeroed. It’s used for branchless calculation of offsets. [#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Amos Bird](https://github.com/amosbird)) - Reverted `jemalloc` version which lead to performance degradation. [#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -2055,7 +2055,7 @@ This release contains exactly the same set of patches as 19.3.6. - Fixed bugs found by PVS-Studio. [#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fixed glibc compatibility issues. [#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Move Docker images to 18.10 and add compatibility file for glibc \>= 2.28 [#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alesapin](https://github.com/alesapin)) -- Add env variable if user don’t want to chown directories in server Docker image. [#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) +- Add env variable if user do not want to chown directories in server Docker image. [#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) - Enabled most of the warnings from `-Weverything` in clang. Enabled `-Wpedantic`. [#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Added a few more warnings that are available only in clang 8. [#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Link to `libLLVM` rather than to individual LLVM libs when using shared linking. [#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) diff --git a/docs/en/whats-new/changelog/2020.md b/docs/en/whats-new/changelog/2020.md index bf4e4fb0fcc..7fb1f5d9377 100644 --- a/docs/en/whats-new/changelog/2020.md +++ b/docs/en/whats-new/changelog/2020.md @@ -14,7 +14,7 @@ toc_title: '2020' * Restrict merges from wide to compact parts. In case of vertical merge it led to broken result part. [#18381](https://github.com/ClickHouse/ClickHouse/pull/18381) ([Anton Popov](https://github.com/CurtizJ)). * Fix filling table `system.settings_profile_elements`. This PR fixes [#18231](https://github.com/ClickHouse/ClickHouse/issues/18231). [#18379](https://github.com/ClickHouse/ClickHouse/pull/18379) ([Vitaly Baranov](https://github.com/vitlibar)). * Fix possible crashes in aggregate functions with combinator `Distinct`, while using two-level aggregation. Fixes [#17682](https://github.com/ClickHouse/ClickHouse/issues/17682). [#18365](https://github.com/ClickHouse/ClickHouse/pull/18365) ([Anton Popov](https://github.com/CurtizJ)). -* Fix error when query `MODIFY COLUMN ... REMOVE TTL` doesn't actually remove column TTL. [#18130](https://github.com/ClickHouse/ClickHouse/pull/18130) ([alesapin](https://github.com/alesapin)). +* Fix error when query `MODIFY COLUMN ... REMOVE TTL` does not actually remove column TTL. [#18130](https://github.com/ClickHouse/ClickHouse/pull/18130) ([alesapin](https://github.com/alesapin)). #### Build/Testing/Packaging Improvement @@ -88,7 +88,7 @@ toc_title: '2020' * Fix `optimize_distributed_group_by_sharding_key` setting (that is disabled by default) for query with OFFSET only. [#16996](https://github.com/ClickHouse/ClickHouse/pull/16996) ([Azat Khuzhin](https://github.com/azat)). * Fix for Merge tables over Distributed tables with JOIN. [#16993](https://github.com/ClickHouse/ClickHouse/pull/16993) ([Azat Khuzhin](https://github.com/azat)). * Fixed wrong result in big integers (128, 256 bit) when casting from double. Big integers support is experimental. [#16986](https://github.com/ClickHouse/ClickHouse/pull/16986) ([Mike](https://github.com/myrrc)). -* Fix possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter doesn't finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). +* Fix possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter does not finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). * Blame info was not calculated correctly in `clickhouse-git-import`. [#16959](https://github.com/ClickHouse/ClickHouse/pull/16959) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix order by optimization with monotonous functions. Fixes [#16107](https://github.com/ClickHouse/ClickHouse/issues/16107). [#16956](https://github.com/ClickHouse/ClickHouse/pull/16956) ([Anton Popov](https://github.com/CurtizJ)). * Fix optimization of group by with enabled setting `optimize_aggregators_of_group_by_keys` and joins. Fixes [#12604](https://github.com/ClickHouse/ClickHouse/issues/12604). [#16951](https://github.com/ClickHouse/ClickHouse/pull/16951) ([Anton Popov](https://github.com/CurtizJ)). @@ -212,7 +212,7 @@ toc_title: '2020' * `SELECT count() FROM table` now can be executed if only one any column can be selected from the `table`. This PR fixes [#10639](https://github.com/ClickHouse/ClickHouse/issues/10639). [#18233](https://github.com/ClickHouse/ClickHouse/pull/18233) ([Vitaly Baranov](https://github.com/vitlibar)). * `SELECT JOIN` now requires the `SELECT` privilege on each of the joined tables. This PR fixes [#17654](https://github.com/ClickHouse/ClickHouse/issues/17654). [#18232](https://github.com/ClickHouse/ClickHouse/pull/18232) ([Vitaly Baranov](https://github.com/vitlibar)). * Fix possible incomplete query result while reading from `MergeTree*` in case of read backoff (message ` MergeTreeReadPool: Will lower number of threads` in logs). Was introduced in [#16423](https://github.com/ClickHouse/ClickHouse/issues/16423). Fixes [#18137](https://github.com/ClickHouse/ClickHouse/issues/18137). [#18216](https://github.com/ClickHouse/ClickHouse/pull/18216) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fix error when query `MODIFY COLUMN ... REMOVE TTL` doesn't actually remove column TTL. [#18130](https://github.com/ClickHouse/ClickHouse/pull/18130) ([alesapin](https://github.com/alesapin)). +* Fix error when query `MODIFY COLUMN ... REMOVE TTL` does not actually remove column TTL. [#18130](https://github.com/ClickHouse/ClickHouse/pull/18130) ([alesapin](https://github.com/alesapin)). * Fix indeterministic functions with predicate optimizer. This fixes [#17244](https://github.com/ClickHouse/ClickHouse/issues/17244). [#17273](https://github.com/ClickHouse/ClickHouse/pull/17273) ([Winter Zhang](https://github.com/zhang2014)). * Mutation might hang waiting for some non-existent part after `MOVE` or `REPLACE PARTITION` or, in rare cases, after `DETACH` or `DROP PARTITION`. It's fixed. [#15537](https://github.com/ClickHouse/ClickHouse/pull/15537) ([tavplubix](https://github.com/tavplubix)). @@ -253,7 +253,7 @@ toc_title: '2020' * Avoid unnecessary network errors for remote queries which may be cancelled while execution, like queries with `LIMIT`. [#17006](https://github.com/ClickHouse/ClickHouse/pull/17006) ([Azat Khuzhin](https://github.com/azat)). * Fixed wrong result in big integers (128, 256 bit) when casting from double. [#16986](https://github.com/ClickHouse/ClickHouse/pull/16986) ([Mike](https://github.com/myrrc)). * Reresolve the IP of the `format_avro_schema_registry_url` in case of errors. [#16985](https://github.com/ClickHouse/ClickHouse/pull/16985) ([filimonov](https://github.com/filimonov)). -* Fixed possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter doesn't finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). +* Fixed possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter does not finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). * Blame info was not calculated correctly in `clickhouse-git-import`. [#16959](https://github.com/ClickHouse/ClickHouse/pull/16959) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fixed order by optimization with monotonous functions. Fixes [#16107](https://github.com/ClickHouse/ClickHouse/issues/16107). [#16956](https://github.com/ClickHouse/ClickHouse/pull/16956) ([Anton Popov](https://github.com/CurtizJ)). * Fixed optimization of group by with enabled setting `optimize_aggregators_of_group_by_keys` and joins. This fixes [#12604](https://github.com/ClickHouse/ClickHouse/issues/12604). [#16951](https://github.com/ClickHouse/ClickHouse/pull/16951) ([Anton Popov](https://github.com/CurtizJ)). @@ -424,7 +424,7 @@ toc_title: '2020' * Avoid unnecessary network errors for remote queries which may be cancelled while execution, like queries with `LIMIT`. [#17006](https://github.com/ClickHouse/ClickHouse/pull/17006) ([Azat Khuzhin](https://github.com/azat)). * Fixed wrong result in big integers (128, 256 bit) when casting from double. [#16986](https://github.com/ClickHouse/ClickHouse/pull/16986) ([Mike](https://github.com/myrrc)). * Reresolve the IP of the `format_avro_schema_registry_url` in case of errors. [#16985](https://github.com/ClickHouse/ClickHouse/pull/16985) ([filimonov](https://github.com/filimonov)). -* Fixed possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter doesn't finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). +* Fixed possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter does not finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). * Blame info was not calculated correctly in `clickhouse-git-import`. [#16959](https://github.com/ClickHouse/ClickHouse/pull/16959) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fixed order by optimization with monotonous functions. This fixes [#16107](https://github.com/ClickHouse/ClickHouse/issues/16107). [#16956](https://github.com/ClickHouse/ClickHouse/pull/16956) ([Anton Popov](https://github.com/CurtizJ)). * Fixrf optimization of group by with enabled setting `optimize_aggregators_of_group_by_keys` and joins. This fixes [#12604](https://github.com/ClickHouse/ClickHouse/issues/12604). [#16951](https://github.com/ClickHouse/ClickHouse/pull/16951) ([Anton Popov](https://github.com/CurtizJ)). @@ -514,7 +514,7 @@ toc_title: '2020' * Fix rare segfaults when inserting into or selecting from MaterializedView and concurrently dropping target table (for Atomic database engine). [#15984](https://github.com/ClickHouse/ClickHouse/pull/15984) ([tavplubix](https://github.com/tavplubix)). * Fix ambiguity in parsing of settings profiles: `CREATE USER ... SETTINGS profile readonly` is now considered as using a profile named `readonly`, not a setting named `profile` with the readonly constraint. This fixes [#15628](https://github.com/ClickHouse/ClickHouse/issues/15628). [#15982](https://github.com/ClickHouse/ClickHouse/pull/15982) ([Vitaly Baranov](https://github.com/vitlibar)). * `MaterializeMySQL` (experimental feature): Fix crash on create database failure. [#15954](https://github.com/ClickHouse/ClickHouse/pull/15954) ([Winter Zhang](https://github.com/zhang2014)). -* Fixed `DROP TABLE IF EXISTS` failure with `Table ... doesn't exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) - Fixed `DROP/DETACH DATABASE` failure with `Table ... doesn't exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)). +* Fixed `DROP TABLE IF EXISTS` failure with `Table ... does not exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) - Fixed `DROP/DETACH DATABASE` failure with `Table ... does not exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)). * Fix incorrect empty result for query from `Distributed` table if query has `WHERE`, `PREWHERE` and `GLOBAL IN`. Fixes [#15792](https://github.com/ClickHouse/ClickHouse/issues/15792). [#15933](https://github.com/ClickHouse/ClickHouse/pull/15933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fixes [#12513](https://github.com/ClickHouse/ClickHouse/issues/12513): difference expressions with same alias when query is reanalyzed. [#15886](https://github.com/ClickHouse/ClickHouse/pull/15886) ([Winter Zhang](https://github.com/zhang2014)). * Fix possible very rare deadlocks in RBAC implementation. [#15875](https://github.com/ClickHouse/ClickHouse/pull/15875) ([Vitaly Baranov](https://github.com/vitlibar)). @@ -535,7 +535,7 @@ toc_title: '2020' * Fixed `Element ... is not a constant expression` error when using `JSON*` function result in `VALUES`, `LIMIT` or right side of `IN` operator. [#15589](https://github.com/ClickHouse/ClickHouse/pull/15589) ([tavplubix](https://github.com/tavplubix)). * Query will finish faster in case of exception. Cancel execution on remote replicas if exception happens. [#15578](https://github.com/ClickHouse/ClickHouse/pull/15578) ([Azat Khuzhin](https://github.com/azat)). * Prevent the possibility of error message `Could not calculate available disk space (statvfs), errno: 4, strerror: Interrupted system call`. This fixes [#15541](https://github.com/ClickHouse/ClickHouse/issues/15541). [#15557](https://github.com/ClickHouse/ClickHouse/pull/15557) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* Fix `Database doesn't exist.` in queries with IN and Distributed table when there's no database on initiator. [#15538](https://github.com/ClickHouse/ClickHouse/pull/15538) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix `Database does not exist.` in queries with IN and Distributed table when there's no database on initiator. [#15538](https://github.com/ClickHouse/ClickHouse/pull/15538) ([Artem Zuikov](https://github.com/4ertus2)). * Mutation might hang waiting for some non-existent part after `MOVE` or `REPLACE PARTITION` or, in rare cases, after `DETACH` or `DROP PARTITION`. It's fixed. [#15537](https://github.com/ClickHouse/ClickHouse/pull/15537) ([tavplubix](https://github.com/tavplubix)). * Fix bug when `ILIKE` operator stops being case insensitive if `LIKE` with the same pattern was executed. [#15536](https://github.com/ClickHouse/ClickHouse/pull/15536) ([alesapin](https://github.com/alesapin)). * Fix `Missing columns` errors when selecting columns which absent in data, but depend on other columns which also absent in data. Fixes [#15530](https://github.com/ClickHouse/ClickHouse/issues/15530). [#15532](https://github.com/ClickHouse/ClickHouse/pull/15532) ([alesapin](https://github.com/alesapin)). @@ -552,7 +552,7 @@ toc_title: '2020' * Fix MSan report in QueryLog. Uninitialized memory can be used for the field `memory_usage`. [#15258](https://github.com/ClickHouse/ClickHouse/pull/15258) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix 'Unknown identifier' in GROUP BY when query has JOIN over Merge table. [#15242](https://github.com/ClickHouse/ClickHouse/pull/15242) ([Artem Zuikov](https://github.com/4ertus2)). * Fix instance crash when using `joinGet` with `LowCardinality` types. This fixes [#15214](https://github.com/ClickHouse/ClickHouse/issues/15214). [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)). -* Fix bug in table engine `Buffer` which doesn't allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)). +* Fix bug in table engine `Buffer` which does not allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)). * Adjust Decimal field size in MySQL column definition packet. [#15152](https://github.com/ClickHouse/ClickHouse/pull/15152) ([maqroll](https://github.com/maqroll)). * Fixes `Data compressed with different methods` in `join_algorithm='auto'`. Keep LowCardinality as type for left table join key in `join_algorithm='partial_merge'`. [#15088](https://github.com/ClickHouse/ClickHouse/pull/15088) ([Artem Zuikov](https://github.com/4ertus2)). * Update `jemalloc` to fix `percpu_arena` with affinity mask. [#15035](https://github.com/ClickHouse/ClickHouse/pull/15035) ([Azat Khuzhin](https://github.com/azat)). [#14957](https://github.com/ClickHouse/ClickHouse/pull/14957) ([Azat Khuzhin](https://github.com/azat)). @@ -711,7 +711,7 @@ toc_title: '2020' * Fix bug when `ON CLUSTER` queries may hang forever for non-leader ReplicatedMergeTreeTables. [#17089](https://github.com/ClickHouse/ClickHouse/pull/17089) ([alesapin](https://github.com/alesapin)). * Reresolve the IP of the `format_avro_schema_registry_url` in case of errors. [#16985](https://github.com/ClickHouse/ClickHouse/pull/16985) ([filimonov](https://github.com/filimonov)). -* Fix possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter doesn't finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). +* Fix possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter does not finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). * Install script should always create subdirs in config folders. This is only relevant for Docker build with custom config. [#16936](https://github.com/ClickHouse/ClickHouse/pull/16936) ([filimonov](https://github.com/filimonov)). * Fix possible error `Illegal type of argument` for queries with `ORDER BY`. Fixes [#16580](https://github.com/ClickHouse/ClickHouse/issues/16580). [#16928](https://github.com/ClickHouse/ClickHouse/pull/16928) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Abort multipart upload if no data was written to WriteBufferFromS3. [#16840](https://github.com/ClickHouse/ClickHouse/pull/16840) ([Pavel Kovalenko](https://github.com/Jokser)). @@ -751,7 +751,7 @@ toc_title: '2020' * Fix rare segfaults when inserting into or selecting from MaterializedView and concurrently dropping target table (for Atomic database engine). [#15984](https://github.com/ClickHouse/ClickHouse/pull/15984) ([tavplubix](https://github.com/tavplubix)). * Fix ambiguity in parsing of settings profiles: `CREATE USER ... SETTINGS profile readonly` is now considered as using a profile named `readonly`, not a setting named `profile` with the readonly constraint. This fixes [#15628](https://github.com/ClickHouse/ClickHouse/issues/15628). [#15982](https://github.com/ClickHouse/ClickHouse/pull/15982) ([Vitaly Baranov](https://github.com/vitlibar)). * Fix a crash when database creation fails. [#15954](https://github.com/ClickHouse/ClickHouse/pull/15954) ([Winter Zhang](https://github.com/zhang2014)). -* Fixed `DROP TABLE IF EXISTS` failure with `Table ... doesn't exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) Fixed `DROP/DETACH DATABASE` failure with `Table ... doesn't exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)). +* Fixed `DROP TABLE IF EXISTS` failure with `Table ... does not exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) Fixed `DROP/DETACH DATABASE` failure with `Table ... does not exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)). * Fix incorrect empty result for query from `Distributed` table if query has `WHERE`, `PREWHERE` and `GLOBAL IN`. Fixes [#15792](https://github.com/ClickHouse/ClickHouse/issues/15792). [#15933](https://github.com/ClickHouse/ClickHouse/pull/15933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix possible deadlocks in RBAC. [#15875](https://github.com/ClickHouse/ClickHouse/pull/15875) ([Vitaly Baranov](https://github.com/vitlibar)). * Fix exception `Block structure mismatch` in `SELECT ... ORDER BY DESC` queries which were executed after `ALTER MODIFY COLUMN` query. Fixes [#15800](https://github.com/ClickHouse/ClickHouse/issues/15800). [#15852](https://github.com/ClickHouse/ClickHouse/pull/15852) ([alesapin](https://github.com/alesapin)). @@ -789,7 +789,7 @@ toc_title: '2020' * Fix rare race condition on server startup when system.logs are enabled. [#15300](https://github.com/ClickHouse/ClickHouse/pull/15300) ([alesapin](https://github.com/alesapin)). * Fix MSan report in QueryLog. Uninitialized memory can be used for the field `memory_usage`. [#15258](https://github.com/ClickHouse/ClickHouse/pull/15258) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix instance crash when using joinGet with LowCardinality types. This fixes [#15214](https://github.com/ClickHouse/ClickHouse/issues/15214). [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)). -* Fix bug in table engine `Buffer` which doesn't allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)). +* Fix bug in table engine `Buffer` which does not allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)). * Adjust decimals field size in mysql column definition packet. [#15152](https://github.com/ClickHouse/ClickHouse/pull/15152) ([maqroll](https://github.com/maqroll)). * Fixed `Cannot rename ... errno: 22, strerror: Invalid argument` error on DDL query execution in Atomic database when running clickhouse-server in docker on Mac OS. [#15024](https://github.com/ClickHouse/ClickHouse/pull/15024) ([tavplubix](https://github.com/tavplubix)). * Fix to make predicate push down work when subquery contains finalizeAggregation function. Fixes [#14847](https://github.com/ClickHouse/ClickHouse/issues/14847). [#14937](https://github.com/ClickHouse/ClickHouse/pull/14937) ([filimonov](https://github.com/filimonov)). @@ -909,7 +909,7 @@ toc_title: '2020' * Fixed bug when `ON CLUSTER` queries may hang forever for non-leader ReplicatedMergeTreeTables. [#17089](https://github.com/ClickHouse/ClickHouse/pull/17089) ([alesapin](https://github.com/alesapin)). * Avoid unnecessary network errors for remote queries which may be cancelled while execution, like queries with `LIMIT`. [#17006](https://github.com/ClickHouse/ClickHouse/pull/17006) ([Azat Khuzhin](https://github.com/azat)). * Reresolve the IP of the `format_avro_schema_registry_url` in case of errors. [#16985](https://github.com/ClickHouse/ClickHouse/pull/16985) ([filimonov](https://github.com/filimonov)). -* Fixed possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter doesn't finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). +* Fixed possible server crash after `ALTER TABLE ... MODIFY COLUMN ... NewType` when `SELECT` have `WHERE` expression on altering column and alter does not finished yet. [#16968](https://github.com/ClickHouse/ClickHouse/pull/16968) ([Amos Bird](https://github.com/amosbird)). * Install script should always create subdirs in config folders. This is only relevant for Docker build with custom config. [#16936](https://github.com/ClickHouse/ClickHouse/pull/16936) ([filimonov](https://github.com/filimonov)). * Fixed possible error `Illegal type of argument` for queries with `ORDER BY`. Fixes [#16580](https://github.com/ClickHouse/ClickHouse/issues/16580). [#16928](https://github.com/ClickHouse/ClickHouse/pull/16928) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Abort multipart upload if no data was written to WriteBufferFromS3. [#16840](https://github.com/ClickHouse/ClickHouse/pull/16840) ([Pavel Kovalenko](https://github.com/Jokser)). @@ -949,7 +949,7 @@ toc_title: '2020' * Fix rare segfaults when inserting into or selecting from MaterializedView and concurrently dropping target table (for Atomic database engine). [#15984](https://github.com/ClickHouse/ClickHouse/pull/15984) ([tavplubix](https://github.com/tavplubix)). * Fix ambiguity in parsing of settings profiles: `CREATE USER ... SETTINGS profile readonly` is now considered as using a profile named `readonly`, not a setting named `profile` with the readonly constraint. This fixes [#15628](https://github.com/ClickHouse/ClickHouse/issues/15628). [#15982](https://github.com/ClickHouse/ClickHouse/pull/15982) ([Vitaly Baranov](https://github.com/vitlibar)). * Fix a crash when database creation fails. [#15954](https://github.com/ClickHouse/ClickHouse/pull/15954) ([Winter Zhang](https://github.com/zhang2014)). -* Fixed `DROP TABLE IF EXISTS` failure with `Table ... doesn't exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) Fixed `DROP/DETACH DATABASE` failure with `Table ... doesn't exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)). +* Fixed `DROP TABLE IF EXISTS` failure with `Table ... does not exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) Fixed `DROP/DETACH DATABASE` failure with `Table ... does not exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)). * Fix incorrect empty result for query from `Distributed` table if query has `WHERE`, `PREWHERE` and `GLOBAL IN`. Fixes [#15792](https://github.com/ClickHouse/ClickHouse/issues/15792). [#15933](https://github.com/ClickHouse/ClickHouse/pull/15933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix possible deadlocks in RBAC. [#15875](https://github.com/ClickHouse/ClickHouse/pull/15875) ([Vitaly Baranov](https://github.com/vitlibar)). * Fix exception `Block structure mismatch` in `SELECT ... ORDER BY DESC` queries which were executed after `ALTER MODIFY COLUMN` query. Fixes [#15800](https://github.com/ClickHouse/ClickHouse/issues/15800). [#15852](https://github.com/ClickHouse/ClickHouse/pull/15852) ([alesapin](https://github.com/alesapin)). @@ -984,7 +984,7 @@ toc_title: '2020' * Fix rare race condition on server startup when system.logs are enabled. [#15300](https://github.com/ClickHouse/ClickHouse/pull/15300) ([alesapin](https://github.com/alesapin)). * Fix MSan report in QueryLog. Uninitialized memory can be used for the field `memory_usage`. [#15258](https://github.com/ClickHouse/ClickHouse/pull/15258) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix instance crash when using joinGet with LowCardinality types. This fixes [#15214](https://github.com/ClickHouse/ClickHouse/issues/15214). [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)). -* Fix bug in table engine `Buffer` which doesn't allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)). +* Fix bug in table engine `Buffer` which does not allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)). * Adjust decimals field size in mysql column definition packet. [#15152](https://github.com/ClickHouse/ClickHouse/pull/15152) ([maqroll](https://github.com/maqroll)). * We already use padded comparison between String and FixedString (https://github.com/ClickHouse/ClickHouse/blob/master/src/Functions/FunctionsComparison.h#L333). This PR applies the same logic to field comparison which corrects the usage of FixedString as primary keys. This fixes [#14908](https://github.com/ClickHouse/ClickHouse/issues/14908). [#15033](https://github.com/ClickHouse/ClickHouse/pull/15033) ([Amos Bird](https://github.com/amosbird)). * If function `bar` was called with specifically crafted arguments, buffer overflow was possible. This closes [#13926](https://github.com/ClickHouse/ClickHouse/issues/13926). [#15028](https://github.com/ClickHouse/ClickHouse/pull/15028) ([alexey-milovidov](https://github.com/alexey-milovidov)). @@ -1028,7 +1028,7 @@ toc_title: '2020' #### Backward Incompatible Change -* Now `OPTIMIZE FINAL` query doesn't recalculate TTL for parts that were added before TTL was created. Use `ALTER TABLE ... MATERIALIZE TTL` once to calculate them, after that `OPTIMIZE FINAL` will evaluate TTL's properly. This behavior never worked for replicated tables. [#14220](https://github.com/ClickHouse/ClickHouse/pull/14220) ([alesapin](https://github.com/alesapin)). +* Now `OPTIMIZE FINAL` query does not recalculate TTL for parts that were added before TTL was created. Use `ALTER TABLE ... MATERIALIZE TTL` once to calculate them, after that `OPTIMIZE FINAL` will evaluate TTL's properly. This behavior never worked for replicated tables. [#14220](https://github.com/ClickHouse/ClickHouse/pull/14220) ([alesapin](https://github.com/alesapin)). * Extend `parallel_distributed_insert_select` setting, adding an option to run `INSERT` into local table. The setting changes type from `Bool` to `UInt64`, so the values `false` and `true` are no longer supported. If you have these values in server configuration, the server will not start. Please replace them with `0` and `1`, respectively. [#14060](https://github.com/ClickHouse/ClickHouse/pull/14060) ([Azat Khuzhin](https://github.com/azat)). * Remove support for the `ODBCDriver` input/output format. This was a deprecated format once used for communication with the ClickHouse ODBC driver, now long superseded by the `ODBCDriver2` format. Resolves [#13629](https://github.com/ClickHouse/ClickHouse/issues/13629). [#13847](https://github.com/ClickHouse/ClickHouse/pull/13847) ([hexiaoting](https://github.com/hexiaoting)). * When upgrading from versions older than 20.5, if rolling update is performed and cluster contains both versions 20.5 or greater and less than 20.5, if ClickHouse nodes with old versions are restarted and old version has been started up in presence of newer versions, it may lead to `Part ... intersects previous part` errors. To prevent this error, first install newer clickhouse-server packages on all cluster nodes and then do restarts (so, when clickhouse-server is restarted, it will start up with the new version). @@ -1257,7 +1257,7 @@ toc_title: '2020' * Fixed [#10572](https://github.com/ClickHouse/ClickHouse/issues/10572) fix bloom filter index with const expression. [#12659](https://github.com/ClickHouse/ClickHouse/pull/12659) ([Winter Zhang](https://github.com/zhang2014)). * Fix SIGSEGV in StorageKafka when broker is unavailable (and not only). [#12658](https://github.com/ClickHouse/ClickHouse/pull/12658) ([Azat Khuzhin](https://github.com/azat)). * Add support for function `if` with `Array(UUID)` arguments. This fixes [#11066](https://github.com/ClickHouse/ClickHouse/issues/11066). [#12648](https://github.com/ClickHouse/ClickHouse/pull/12648) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* CREATE USER IF NOT EXISTS now doesn't throw exception if the user exists. This fixes [#12507](https://github.com/ClickHouse/ClickHouse/issues/12507). [#12646](https://github.com/ClickHouse/ClickHouse/pull/12646) ([Vitaly Baranov](https://github.com/vitlibar)). +* CREATE USER IF NOT EXISTS now does not throw exception if the user exists. This fixes [#12507](https://github.com/ClickHouse/ClickHouse/issues/12507). [#12646](https://github.com/ClickHouse/ClickHouse/pull/12646) ([Vitaly Baranov](https://github.com/vitlibar)). * Exception `There is no supertype...` can be thrown during `ALTER ... UPDATE` in unexpected cases (e.g. when subtracting from UInt64 column). This fixes [#7306](https://github.com/ClickHouse/ClickHouse/issues/7306). This fixes [#4165](https://github.com/ClickHouse/ClickHouse/issues/4165). [#12633](https://github.com/ClickHouse/ClickHouse/pull/12633) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix possible `Pipeline stuck` error for queries with external sorting. Fixes [#12617](https://github.com/ClickHouse/ClickHouse/issues/12617). [#12618](https://github.com/ClickHouse/ClickHouse/pull/12618) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix error `Output of TreeExecutor is not sorted` for `OPTIMIZE DEDUPLICATE`. Fixes [#11572](https://github.com/ClickHouse/ClickHouse/issues/11572). [#12613](https://github.com/ClickHouse/ClickHouse/pull/12613) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). @@ -1398,7 +1398,7 @@ toc_title: '2020' * Fixed bloom filter index with const expression. This fixes [#10572](https://github.com/ClickHouse/ClickHouse/issues/10572). [#12659](https://github.com/ClickHouse/ClickHouse/pull/12659) ([Winter Zhang](https://github.com/zhang2014)). * Fixed `SIGSEGV` in `StorageKafka` when broker is unavailable (and not only). [#12658](https://github.com/ClickHouse/ClickHouse/pull/12658) ([Azat Khuzhin](https://github.com/azat)). * Added support for function `if` with `Array(UUID)` arguments. This fixes [#11066](https://github.com/ClickHouse/ClickHouse/issues/11066). [#12648](https://github.com/ClickHouse/ClickHouse/pull/12648) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* `CREATE USER IF NOT EXISTS` now doesn't throw exception if the user exists. This fixes [#12507](https://github.com/ClickHouse/ClickHouse/issues/12507). [#12646](https://github.com/ClickHouse/ClickHouse/pull/12646) ([Vitaly Baranov](https://github.com/vitlibar)). +* `CREATE USER IF NOT EXISTS` now does not throw exception if the user exists. This fixes [#12507](https://github.com/ClickHouse/ClickHouse/issues/12507). [#12646](https://github.com/ClickHouse/ClickHouse/pull/12646) ([Vitaly Baranov](https://github.com/vitlibar)). * Better exception message in disk access storage. [#12625](https://github.com/ClickHouse/ClickHouse/pull/12625) ([alesapin](https://github.com/alesapin)). * The function `groupArrayMoving*` was not working for distributed queries. It's result was calculated within incorrect data type (without promotion to the largest type). The function `groupArrayMovingAvg` was returning integer number that was inconsistent with the `avg` function. This fixes [#12568](https://github.com/ClickHouse/ClickHouse/issues/12568). [#12622](https://github.com/ClickHouse/ClickHouse/pull/12622) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fixed lack of aliases with function `any`. [#12593](https://github.com/ClickHouse/ClickHouse/pull/12593) ([Anton Popov](https://github.com/CurtizJ)). @@ -1436,7 +1436,7 @@ toc_title: '2020' * Cap max_memory_usage* limits to the process resident memory. [#12182](https://github.com/ClickHouse/ClickHouse/pull/12182) ([Azat Khuzhin](https://github.com/azat)). * Fix dictGet arguments check during `GROUP BY` injective functions elimination. [#12179](https://github.com/ClickHouse/ClickHouse/pull/12179) ([Azat Khuzhin](https://github.com/azat)). * Fixed the behaviour when `SummingMergeTree` engine sums up columns from partition key. Added an exception in case of explicit definition of columns to sum which intersects with partition key columns. This fixes [#7867](https://github.com/ClickHouse/ClickHouse/issues/7867). [#12173](https://github.com/ClickHouse/ClickHouse/pull/12173) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Don't split the dictionary source's table name into schema and table name itself if ODBC connection doesn't support schema. [#12165](https://github.com/ClickHouse/ClickHouse/pull/12165) ([Vitaly Baranov](https://github.com/vitlibar)). +* Don't split the dictionary source's table name into schema and table name itself if ODBC connection does not support schema. [#12165](https://github.com/ClickHouse/ClickHouse/pull/12165) ([Vitaly Baranov](https://github.com/vitlibar)). * Fixed wrong logic in `ALTER DELETE` that leads to deleting of records when condition evaluates to NULL. This fixes [#9088](https://github.com/ClickHouse/ClickHouse/issues/9088). This closes [#12106](https://github.com/ClickHouse/ClickHouse/issues/12106). [#12153](https://github.com/ClickHouse/ClickHouse/pull/12153) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fixed transform of query to send to external DBMS (e.g. MySQL, ODBC) in presense of aliases. This fixes [#12032](https://github.com/ClickHouse/ClickHouse/issues/12032). [#12151](https://github.com/ClickHouse/ClickHouse/pull/12151) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fixed bad code in redundant ORDER BY optimization. The bug was introduced in [#10067](https://github.com/ClickHouse/ClickHouse/issues/10067). [#12148](https://github.com/ClickHouse/ClickHouse/pull/12148) ([alexey-milovidov](https://github.com/alexey-milovidov)). @@ -1467,7 +1467,7 @@ toc_title: '2020' * Added `KILL QUERY [connection_id]` for the MySQL client/driver to cancel the long query, issue [#12038](https://github.com/ClickHouse/ClickHouse/issues/12038). [#12152](https://github.com/ClickHouse/ClickHouse/pull/12152) ([BohuTANG](https://github.com/BohuTANG)). * Added support for `%g` (two digit ISO year) and `%G` (four digit ISO year) substitutions in `formatDateTime` function. [#12136](https://github.com/ClickHouse/ClickHouse/pull/12136) ([vivarum](https://github.com/vivarum)). * Added 'type' column in system.disks. [#12115](https://github.com/ClickHouse/ClickHouse/pull/12115) ([ianton-ru](https://github.com/ianton-ru)). -* Improved `REVOKE` command: now it requires grant/admin option for only access which will be revoked. For example, to execute `REVOKE ALL ON *.* FROM user1` now it doesn't require to have full access rights granted with grant option. Added command `REVOKE ALL FROM user1` - it revokes all granted roles from `user1`. [#12083](https://github.com/ClickHouse/ClickHouse/pull/12083) ([Vitaly Baranov](https://github.com/vitlibar)). +* Improved `REVOKE` command: now it requires grant/admin option for only access which will be revoked. For example, to execute `REVOKE ALL ON *.* FROM user1` now it does not require to have full access rights granted with grant option. Added command `REVOKE ALL FROM user1` - it revokes all granted roles from `user1`. [#12083](https://github.com/ClickHouse/ClickHouse/pull/12083) ([Vitaly Baranov](https://github.com/vitlibar)). * Added replica priority for load_balancing (for manual prioritization of the load balancing). [#11995](https://github.com/ClickHouse/ClickHouse/pull/11995) ([Azat Khuzhin](https://github.com/azat)). * Switched paths in S3 metadata to relative which allows to handle S3 blobs more easily. [#11892](https://github.com/ClickHouse/ClickHouse/pull/11892) ([Vladimir Chebotarev](https://github.com/excitoon)). @@ -1617,7 +1617,7 @@ toc_title: '2020' * Fix wrong result of comparison of FixedString with constant String. This fixes [#11393](https://github.com/ClickHouse/ClickHouse/issues/11393). This bug appeared in version 20.4. [#11828](https://github.com/ClickHouse/ClickHouse/pull/11828) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix wrong result for `if` with NULLs in condition. [#11807](https://github.com/ClickHouse/ClickHouse/pull/11807) ([Artem Zuikov](https://github.com/4ertus2)). * Fix using too many threads for queries. [#11788](https://github.com/ClickHouse/ClickHouse/pull/11788) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fixed `Scalar doesn't exist` exception when using `WITH ...` in `SELECT ... FROM merge_tree_table ...` [#11621](https://github.com/ClickHouse/ClickHouse/issues/11621). [#11767](https://github.com/ClickHouse/ClickHouse/pull/11767) ([Amos Bird](https://github.com/amosbird)). +* Fixed `Scalar does not exist` exception when using `WITH ...` in `SELECT ... FROM merge_tree_table ...` [#11621](https://github.com/ClickHouse/ClickHouse/issues/11621). [#11767](https://github.com/ClickHouse/ClickHouse/pull/11767) ([Amos Bird](https://github.com/amosbird)). * Fix unexpected behaviour of queries like `SELECT *, xyz.*` which were success while an error expected. [#11753](https://github.com/ClickHouse/ClickHouse/pull/11753) ([hexiaoting](https://github.com/hexiaoting)). * Now replicated fetches will be cancelled during metadata alter. [#11744](https://github.com/ClickHouse/ClickHouse/pull/11744) ([alesapin](https://github.com/alesapin)). * Parse metadata stored in zookeeper before checking for equality. [#11739](https://github.com/ClickHouse/ClickHouse/pull/11739) ([Azat Khuzhin](https://github.com/azat)). @@ -1637,7 +1637,7 @@ toc_title: '2020' * Fix wrong exit code of the clickhouse-client, when `exception.code() % 256 == 0`. [#11601](https://github.com/ClickHouse/ClickHouse/pull/11601) ([filimonov](https://github.com/filimonov)). * Fix race conditions in CREATE/DROP of different replicas of ReplicatedMergeTree. Continue to work if the table was not removed completely from ZooKeeper or not created successfully. This fixes [#11432](https://github.com/ClickHouse/ClickHouse/issues/11432). [#11592](https://github.com/ClickHouse/ClickHouse/pull/11592) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix trivial error in log message about "Mark cache size was lowered" at server startup. This closes [#11399](https://github.com/ClickHouse/ClickHouse/issues/11399). [#11589](https://github.com/ClickHouse/ClickHouse/pull/11589) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* Fix error `Size of offsets doesn't match size of column` for queries with `PREWHERE column in (subquery)` and `ARRAY JOIN`. [#11580](https://github.com/ClickHouse/ClickHouse/pull/11580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix error `Size of offsets does not match size of column` for queries with `PREWHERE column in (subquery)` and `ARRAY JOIN`. [#11580](https://github.com/ClickHouse/ClickHouse/pull/11580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fixed rare segfault in `SHOW CREATE TABLE` Fixes [#11490](https://github.com/ClickHouse/ClickHouse/issues/11490). [#11579](https://github.com/ClickHouse/ClickHouse/pull/11579) ([tavplubix](https://github.com/tavplubix)). * All queries in HTTP session have had the same query_id. It is fixed. [#11578](https://github.com/ClickHouse/ClickHouse/pull/11578) ([tavplubix](https://github.com/tavplubix)). * Now clickhouse-server docker container will prefer IPv6 checking server aliveness. [#11550](https://github.com/ClickHouse/ClickHouse/pull/11550) ([Ivan Starkov](https://github.com/istarkov)). @@ -1758,7 +1758,7 @@ toc_title: '2020' * Multiple names are now allowed in commands: CREATE USER, CREATE ROLE, ALTER USER, SHOW CREATE USER, SHOW GRANTS and so on. [#11670](https://github.com/ClickHouse/ClickHouse/pull/11670) ([Vitaly Baranov](https://github.com/vitlibar)). * Add support for distributed DDL (`UPDATE/DELETE/DROP PARTITION`) on cross replication clusters. [#11508](https://github.com/ClickHouse/ClickHouse/pull/11508) ([frank lee](https://github.com/etah000)). * Clear password from command line in `clickhouse-client` and `clickhouse-benchmark` if the user has specified it with explicit value. This prevents password exposure by `ps` and similar tools. [#11665](https://github.com/ClickHouse/ClickHouse/pull/11665) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* Don't use debug info from ELF file if it doesn't correspond to the running binary. It is needed to avoid printing wrong function names and source locations in stack traces. This fixes [#7514](https://github.com/ClickHouse/ClickHouse/issues/7514). [#11657](https://github.com/ClickHouse/ClickHouse/pull/11657) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Don't use debug info from ELF file if it does not correspond to the running binary. It is needed to avoid printing wrong function names and source locations in stack traces. This fixes [#7514](https://github.com/ClickHouse/ClickHouse/issues/7514). [#11657](https://github.com/ClickHouse/ClickHouse/pull/11657) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Return NULL/zero when value is not parsed completely in parseDateTimeBestEffortOrNull/Zero functions. This fixes [#7876](https://github.com/ClickHouse/ClickHouse/issues/7876). [#11653](https://github.com/ClickHouse/ClickHouse/pull/11653) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Skip empty parameters in requested URL. They may appear when you write `http://localhost:8123/?&a=b` or `http://localhost:8123/?a=b&&c=d`. This closes [#10749](https://github.com/ClickHouse/ClickHouse/issues/10749). [#11651](https://github.com/ClickHouse/ClickHouse/pull/11651) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Allow using `groupArrayArray` and `groupUniqArrayArray` as `SimpleAggregateFunction`. [#11650](https://github.com/ClickHouse/ClickHouse/pull/11650) ([Volodymyr Kuznetsov](https://github.com/ksvladimir)). @@ -1933,7 +1933,7 @@ toc_title: '2020' * Fixed logical functions for UInt8 values when they are not equal to 0 or 1. [#12196](https://github.com/ClickHouse/ClickHouse/pull/12196) ([Alexander Kazakov](https://github.com/Akazz)). * Cap max_memory_usage* limits to the process resident memory. [#12182](https://github.com/ClickHouse/ClickHouse/pull/12182) ([Azat Khuzhin](https://github.com/azat)). * Fixed `dictGet` arguments check during GROUP BY injective functions elimination. [#12179](https://github.com/ClickHouse/ClickHouse/pull/12179) ([Azat Khuzhin](https://github.com/azat)). -* Don't split the dictionary source's table name into schema and table name itself if ODBC connection doesn't support schema. [#12165](https://github.com/ClickHouse/ClickHouse/pull/12165) ([Vitaly Baranov](https://github.com/vitlibar)). +* Don't split the dictionary source's table name into schema and table name itself if ODBC connection does not support schema. [#12165](https://github.com/ClickHouse/ClickHouse/pull/12165) ([Vitaly Baranov](https://github.com/vitlibar)). * Fixed wrong logic in `ALTER DELETE` that leads to deleting of records when condition evaluates to NULL. This fixes [#9088](https://github.com/ClickHouse/ClickHouse/issues/9088). This closes [#12106](https://github.com/ClickHouse/ClickHouse/issues/12106). [#12153](https://github.com/ClickHouse/ClickHouse/pull/12153) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fixed transform of query to send to external DBMS (e.g. MySQL, ODBC) in presense of aliases. This fixes [#12032](https://github.com/ClickHouse/ClickHouse/issues/12032). [#12151](https://github.com/ClickHouse/ClickHouse/pull/12151) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fixed potential overflow in integer division. This fixes [#12119](https://github.com/ClickHouse/ClickHouse/issues/12119). [#12140](https://github.com/ClickHouse/ClickHouse/pull/12140) ([alexey-milovidov](https://github.com/alexey-milovidov)). @@ -1997,7 +1997,7 @@ toc_title: '2020' * Fix error `Block structure mismatch` for queries with sampling reading from `Buffer` table. [#11602](https://github.com/ClickHouse/ClickHouse/pull/11602) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix wrong exit code of the clickhouse-client, when exception.code() % 256 = 0. [#11601](https://github.com/ClickHouse/ClickHouse/pull/11601) ([filimonov](https://github.com/filimonov)). * Fix trivial error in log message about "Mark cache size was lowered" at server startup. This closes [#11399](https://github.com/ClickHouse/ClickHouse/issues/11399). [#11589](https://github.com/ClickHouse/ClickHouse/pull/11589) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* Fix error `Size of offsets doesn't match size of column` for queries with `PREWHERE column in (subquery)` and `ARRAY JOIN`. [#11580](https://github.com/ClickHouse/ClickHouse/pull/11580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix error `Size of offsets does not match size of column` for queries with `PREWHERE column in (subquery)` and `ARRAY JOIN`. [#11580](https://github.com/ClickHouse/ClickHouse/pull/11580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fixed rare segfault in `SHOW CREATE TABLE` Fixes [#11490](https://github.com/ClickHouse/ClickHouse/issues/11490). [#11579](https://github.com/ClickHouse/ClickHouse/pull/11579) ([tavplubix](https://github.com/tavplubix)). * All queries in HTTP session have had the same query_id. It is fixed. [#11578](https://github.com/ClickHouse/ClickHouse/pull/11578) ([tavplubix](https://github.com/tavplubix)). * Now clickhouse-server docker container will prefer IPv6 checking server aliveness. [#11550](https://github.com/ClickHouse/ClickHouse/pull/11550) ([Ivan Starkov](https://github.com/istarkov)). @@ -2213,7 +2213,7 @@ No changes compared to v20.4.3.16-stable. * Fix Distributed-over-Distributed with the only one shard in a nested table [#9997](https://github.com/ClickHouse/ClickHouse/pull/9997) ([Azat Khuzhin](https://github.com/azat)) * Fix possible rows loss for queries with `JOIN` and `UNION ALL`. Fixes [#9826](https://github.com/ClickHouse/ClickHouse/issues/9826), [#10113](https://github.com/ClickHouse/ClickHouse/issues/10113). ... [#10099](https://github.com/ClickHouse/ClickHouse/pull/10099) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) * Fix bug in dictionary when local clickhouse server is used as source. It may caused memory corruption if types in dictionary and source are not compatible. [#10071](https://github.com/ClickHouse/ClickHouse/pull/10071) ([alesapin](https://github.com/alesapin)) -* Fixed replicated tables startup when updating from an old ClickHouse version where `/table/replicas/replica_name/metadata` node doesn't exist. Fixes [#10037](https://github.com/ClickHouse/ClickHouse/issues/10037). [#10095](https://github.com/ClickHouse/ClickHouse/pull/10095) ([alesapin](https://github.com/alesapin)) +* Fixed replicated tables startup when updating from an old ClickHouse version where `/table/replicas/replica_name/metadata` node does not exist. Fixes [#10037](https://github.com/ClickHouse/ClickHouse/issues/10037). [#10095](https://github.com/ClickHouse/ClickHouse/pull/10095) ([alesapin](https://github.com/alesapin)) * Fix error `Cannot clone block with columns because block has 0 columns ... While executing GroupingAggregatedTransform`. It happened when setting `distributed_aggregation_memory_efficient` was enabled, and distributed query read aggregating data with mixed single and two-level aggregation from different shards. [#10063](https://github.com/ClickHouse/ClickHouse/pull/10063) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) * Fix deadlock when database with materialized view failed attach at start [#10054](https://github.com/ClickHouse/ClickHouse/pull/10054) ([Azat Khuzhin](https://github.com/azat)) * Fix a segmentation fault that could occur in GROUP BY over string keys containing trailing zero bytes ([#8636](https://github.com/ClickHouse/ClickHouse/issues/8636), [#8925](https://github.com/ClickHouse/ClickHouse/issues/8925)). ... [#10025](https://github.com/ClickHouse/ClickHouse/pull/10025) ([Alexander Kuzmenkov](https://github.com/akuzm)) @@ -2228,7 +2228,7 @@ No changes compared to v20.4.3.16-stable. * Fix `TRUNCATE` for Join table engine ([#9917](https://github.com/ClickHouse/ClickHouse/issues/9917)). [#9920](https://github.com/ClickHouse/ClickHouse/pull/9920) ([Amos Bird](https://github.com/amosbird)) * Fix race condition between drop and optimize in `ReplicatedMergeTree`. [#9901](https://github.com/ClickHouse/ClickHouse/pull/9901) ([alesapin](https://github.com/alesapin)) * Fix `DISTINCT` for Distributed when `optimize_skip_unused_shards` is set. [#9808](https://github.com/ClickHouse/ClickHouse/pull/9808) ([Azat Khuzhin](https://github.com/azat)) -* Fix "scalar doesn't exist" error in ALTERs ([#9878](https://github.com/ClickHouse/ClickHouse/issues/9878)). ... [#9904](https://github.com/ClickHouse/ClickHouse/pull/9904) ([Amos Bird](https://github.com/amosbird)) +* Fix "scalar does not exist" error in ALTERs ([#9878](https://github.com/ClickHouse/ClickHouse/issues/9878)). ... [#9904](https://github.com/ClickHouse/ClickHouse/pull/9904) ([Amos Bird](https://github.com/amosbird)) * Fix error with qualified names in `distributed_product_mode=\'local\'`. Fixes [#4756](https://github.com/ClickHouse/ClickHouse/issues/4756) [#9891](https://github.com/ClickHouse/ClickHouse/pull/9891) ([Artem Zuikov](https://github.com/4ertus2)) * For INSERT queries shards now do clamp the settings from the initiator to their constraints instead of throwing an exception. This fix allows to send INSERT queries to a shard with another constraints. This change improves fix [#9447](https://github.com/ClickHouse/ClickHouse/issues/9447). [#9852](https://github.com/ClickHouse/ClickHouse/pull/9852) ([Vitaly Baranov](https://github.com/vitlibar)) * Add some retries when commiting offsets to Kafka broker, since it can reject commit if during `offsets.commit.timeout.ms` there were no enough replicas available for the `__consumer_offsets` topic [#9884](https://github.com/ClickHouse/ClickHouse/pull/9884) ([filimonov](https://github.com/filimonov)) @@ -2248,11 +2248,11 @@ No changes compared to v20.4.3.16-stable. * Fix possible permanent "Cannot schedule a task" error. [#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) * Fix bug in backquoting in external dictionaries DDL. Fixes [#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([alesapin](https://github.com/alesapin)) * Fixed data race in `text_log`. It does not correspond to any real bug. [#9726](https://github.com/ClickHouse/ClickHouse/pull/9726) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix bug in a replication that doesn't allow replication to work if the user has executed mutations on the previous version. This fixes [#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)) +* Fix bug in a replication that does not allow replication to work if the user has executed mutations on the previous version. This fixes [#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)) * Fixed incorrect internal function names for `sumKahan` and `sumWithOverflow`. It led to exception while using this functions in remote queries. [#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)) * Add setting `use_compact_format_in_distributed_parts_names` which allows to write files for `INSERT` queries into `Distributed` table with more compact format. This fixes [#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([alesapin](https://github.com/alesapin)) * Fix RIGHT and FULL JOIN with LowCardinality in JOIN keys. [#9610](https://github.com/ClickHouse/ClickHouse/pull/9610) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix possible exceptions `Size of filter doesn't match size of column` and `Invalid number of rows in Chunk` in `MergeTreeRangeReader`. They could appear while executing `PREWHERE` in some cases. [#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) +* Fix possible exceptions `Size of filter does not match size of column` and `Invalid number of rows in Chunk` in `MergeTreeRangeReader`. They could appear while executing `PREWHERE` in some cases. [#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) * Allow `ALTER ON CLUSTER` of Distributed tables with internal replication. This fixes [#3268](https://github.com/ClickHouse/ClickHouse/issues/3268) [#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)) * Fix issue when timezone was not preserved if you write a simple arithmetic expression like `time + 1` (in contrast to an expression like `time + INTERVAL 1 SECOND`). This fixes [#5743](https://github.com/ClickHouse/ClickHouse/issues/5743) [#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -2533,7 +2533,7 @@ No changes compared to v20.4.3.16-stable. * Fix error `Block structure mismatch` for queries with sampling reading from `Buffer` table. [#11602](https://github.com/ClickHouse/ClickHouse/pull/11602) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix wrong exit code of the clickhouse-client, when exception.code() % 256 = 0. [#11601](https://github.com/ClickHouse/ClickHouse/pull/11601) ([filimonov](https://github.com/filimonov)). * Fix trivial error in log message about "Mark cache size was lowered" at server startup. This closes [#11399](https://github.com/ClickHouse/ClickHouse/issues/11399). [#11589](https://github.com/ClickHouse/ClickHouse/pull/11589) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* Fix error `Size of offsets doesn't match size of column` for queries with `PREWHERE column in (subquery)` and `ARRAY JOIN`. [#11580](https://github.com/ClickHouse/ClickHouse/pull/11580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix error `Size of offsets does not match size of column` for queries with `PREWHERE column in (subquery)` and `ARRAY JOIN`. [#11580](https://github.com/ClickHouse/ClickHouse/pull/11580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * All queries in HTTP session have had the same query_id. It is fixed. [#11578](https://github.com/ClickHouse/ClickHouse/pull/11578) ([tavplubix](https://github.com/tavplubix)). * Now clickhouse-server docker container will prefer IPv6 checking server aliveness. [#11550](https://github.com/ClickHouse/ClickHouse/pull/11550) ([Ivan Starkov](https://github.com/istarkov)). * Fix shard_num/replica_num for `` (breaks use_compact_format_in_distributed_parts_names). [#11528](https://github.com/ClickHouse/ClickHouse/pull/11528) ([Azat Khuzhin](https://github.com/azat)). @@ -2692,7 +2692,7 @@ No changes compared to v20.4.3.16-stable. * Fix incorrect `index_granularity_bytes` check while creating new replica. Fixes [#10098](https://github.com/ClickHouse/ClickHouse/issues/10098). [#10121](https://github.com/ClickHouse/ClickHouse/pull/10121) ([alesapin](https://github.com/alesapin)). * Fix SIGSEGV on INSERT into Distributed table when its structure differs from the underlying tables. [#10105](https://github.com/ClickHouse/ClickHouse/pull/10105) ([Azat Khuzhin](https://github.com/azat)). * Fix possible rows loss for queries with `JOIN` and `UNION ALL`. Fixes [#9826](https://github.com/ClickHouse/ClickHouse/issues/9826), [#10113](https://github.com/ClickHouse/ClickHouse/issues/10113). [#10099](https://github.com/ClickHouse/ClickHouse/pull/10099) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fixed replicated tables startup when updating from an old ClickHouse version where `/table/replicas/replica_name/metadata` node doesn't exist. Fixes [#10037](https://github.com/ClickHouse/ClickHouse/issues/10037). [#10095](https://github.com/ClickHouse/ClickHouse/pull/10095) ([alesapin](https://github.com/alesapin)). +* Fixed replicated tables startup when updating from an old ClickHouse version where `/table/replicas/replica_name/metadata` node does not exist. Fixes [#10037](https://github.com/ClickHouse/ClickHouse/issues/10037). [#10095](https://github.com/ClickHouse/ClickHouse/pull/10095) ([alesapin](https://github.com/alesapin)). * Add some arguments check and support identifier arguments for MySQL Database Engine. [#10077](https://github.com/ClickHouse/ClickHouse/pull/10077) ([Winter Zhang](https://github.com/zhang2014)). * Fix bug in clickhouse dictionary source from localhost clickhouse server. The bug may lead to memory corruption if types in dictionary and source are not compatible. [#10071](https://github.com/ClickHouse/ClickHouse/pull/10071) ([alesapin](https://github.com/alesapin)). * Fix bug in `CHECK TABLE` query when table contain skip indices. [#10068](https://github.com/ClickHouse/ClickHouse/pull/10068) ([alesapin](https://github.com/alesapin)). @@ -2704,7 +2704,7 @@ No changes compared to v20.4.3.16-stable. * Fix a bug with `ON CLUSTER` DDL queries freezing on server startup. [#9927](https://github.com/ClickHouse/ClickHouse/pull/9927) ([Gagan Arneja](https://github.com/garneja)). * Fix parsing multiple hosts set in the CREATE USER command, e.g. `CREATE USER user6 HOST NAME REGEXP 'lo.?*host', NAME REGEXP 'lo*host'`. [#9924](https://github.com/ClickHouse/ClickHouse/pull/9924) ([Vitaly Baranov](https://github.com/vitlibar)). * Fix `TRUNCATE` for Join table engine ([#9917](https://github.com/ClickHouse/ClickHouse/issues/9917)). [#9920](https://github.com/ClickHouse/ClickHouse/pull/9920) ([Amos Bird](https://github.com/amosbird)). -* Fix "scalar doesn't exist" error in ALTERs ([#9878](https://github.com/ClickHouse/ClickHouse/issues/9878)). [#9904](https://github.com/ClickHouse/ClickHouse/pull/9904) ([Amos Bird](https://github.com/amosbird)). +* Fix "scalar does not exist" error in ALTERs ([#9878](https://github.com/ClickHouse/ClickHouse/issues/9878)). [#9904](https://github.com/ClickHouse/ClickHouse/pull/9904) ([Amos Bird](https://github.com/amosbird)). * Fix race condition between drop and optimize in `ReplicatedMergeTree`. [#9901](https://github.com/ClickHouse/ClickHouse/pull/9901) ([alesapin](https://github.com/alesapin)). * Fix error with qualified names in `distributed_product_mode='local'`. Fixes [#4756](https://github.com/ClickHouse/ClickHouse/issues/4756). [#9891](https://github.com/ClickHouse/ClickHouse/pull/9891) ([Artem Zuikov](https://github.com/4ertus2)). * Fix calculating grants for introspection functions from the setting 'allow_introspection_functions'. [#9840](https://github.com/ClickHouse/ClickHouse/pull/9840) ([Vitaly Baranov](https://github.com/vitlibar)). @@ -2745,7 +2745,7 @@ No changes compared to v20.4.3.16-stable. #### Bug Fix * This release also contains all bug fixes from 20.1.7.38 -* Fix bug in a replication that doesn't allow replication to work if the user has executed mutations on the previous version. This fixes [#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)). It makes version 20.3 backward compatible again. +* Fix bug in a replication that does not allow replication to work if the user has executed mutations on the previous version. This fixes [#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)). It makes version 20.3 backward compatible again. * Add setting `use_compact_format_in_distributed_parts_names` which allows to write files for `INSERT` queries into `Distributed` table with more compact format. This fixes [#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([alesapin](https://github.com/alesapin)). It makes version 20.3 backward compatible again. ### ClickHouse release v20.3.2.1, 2020-03-12 @@ -2760,7 +2760,7 @@ No changes compared to v20.4.3.16-stable. * Remove `findClusterIndex`, `findClusterValue` functions. This fixes [#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). If you were using these functions, send an email to `clickhouse-feedback@yandex-team.com` [#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Now it's not allowed to create columns or add columns with `SELECT` subquery as default expression. [#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([alesapin](https://github.com/alesapin)) * Require aliases for subqueries in JOIN. [#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) -* Improved `ALTER MODIFY/ADD` queries logic. Now you cannot `ADD` column without type, `MODIFY` default expression doesn't change type of column and `MODIFY` type doesn't loose default expression value. Fixes [#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([alesapin](https://github.com/alesapin)) +* Improved `ALTER MODIFY/ADD` queries logic. Now you cannot `ADD` column without type, `MODIFY` default expression does not change type of column and `MODIFY` type does not loose default expression value. Fixes [#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([alesapin](https://github.com/alesapin)) * Require server to be restarted to apply the changes in logging configuration. This is a temporary workaround to avoid the bug where the server logs to a deleted log file (see [#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) * The setting `experimental_use_processors` is enabled by default. This setting enables usage of the new query pipeline. This is internal refactoring and we expect no visible changes. If you will see any issues, set it to back zero. [#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -2811,7 +2811,7 @@ No changes compared to v20.4.3.16-stable. * Found keys were counted as missed in metrics of cache dictionaries. [#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) * Fix replication protocol incompatibility introduced in [#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) * Fixed race condition on `queue_task_handle` at the startup of `ReplicatedMergeTree` tables. [#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* The token `NOT` didn't work in `SHOW TABLES NOT LIKE` query [#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* The token `NOT` did not work in `SHOW TABLES NOT LIKE` query [#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Added range check to function `h3EdgeLengthM`. Without this check, buffer overflow is possible. [#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fixed up a bug in batched calculations of ternary logical OPs on multiple arguments (more than 10). [#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) * Fix error of PREWHERE optimization, which could lead to segfaults or `Inconsistent number of columns got from MergeTreeRangeReader` exception. [#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([Anton Popov](https://github.com/CurtizJ)) @@ -2872,7 +2872,7 @@ No changes compared to v20.4.3.16-stable. * Fix bug in which a misleading error message was shown when running `SHOW CREATE TABLE a_table_that_does_not_exist`. [#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2](https://github.com/achulkov2)) * Fixed `Parameters are out of bound` exception in some rare cases when we have a constant in the `SELECT` clause when we have an `ORDER BY` and a `LIMIT` clause. [#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([Guillaume Tassery](https://github.com/YiuRULE)) * Fix mutations finalization, when already done mutation can have status `is_done=0`. [#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([alesapin](https://github.com/alesapin)) -* Prevent from executing `ALTER ADD INDEX` for MergeTree tables with old syntax, because it doesn't work. [#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([Mikhail Korotov](https://github.com/millb)) +* Prevent from executing `ALTER ADD INDEX` for MergeTree tables with old syntax, because it does not work. [#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([Mikhail Korotov](https://github.com/millb)) * During server startup do not access table, which `LIVE VIEW` depends on, so server will be able to start. Also remove `LIVE VIEW` dependencies when detaching `LIVE VIEW`. `LIVE VIEW` is an experimental feature. [#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) * Fix possible segfault in `MergeTreeRangeReader`, while executing `PREWHERE`. [#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([Anton Popov](https://github.com/CurtizJ)) * Fix possible mismatched checksums with column TTLs. [#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([Anton Popov](https://github.com/CurtizJ)) @@ -3020,7 +3020,7 @@ No changes compared to v20.4.3.16-stable. #### Bug Fix -* Fix error `Size of offsets doesn't match size of column` for queries with `PREWHERE column in (subquery)` and `ARRAY JOIN`. [#11580](https://github.com/ClickHouse/ClickHouse/pull/11580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix error `Size of offsets does not match size of column` for queries with `PREWHERE column in (subquery)` and `ARRAY JOIN`. [#11580](https://github.com/ClickHouse/ClickHouse/pull/11580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). ### ClickHouse release v20.1.13.105-stable 2020-06-10 @@ -3116,7 +3116,7 @@ No changes compared to v20.4.3.16-stable. * Fix `'Not found column in block'` error when `JOIN` appears with `TOTALS`. Fixes [#9839](https://github.com/ClickHouse/ClickHouse/issues/9839). [#9939](https://github.com/ClickHouse/ClickHouse/pull/9939) ([Artem Zuikov](https://github.com/4ertus2)). * Fix a bug with `ON CLUSTER` DDL queries freezing on server startup. [#9927](https://github.com/ClickHouse/ClickHouse/pull/9927) ([Gagan Arneja](https://github.com/garneja)). * Fix `TRUNCATE` for Join table engine ([#9917](https://github.com/ClickHouse/ClickHouse/issues/9917)). [#9920](https://github.com/ClickHouse/ClickHouse/pull/9920) ([Amos Bird](https://github.com/amosbird)). -* Fix `'scalar doesn't exist'` error in ALTER queries ([#9878](https://github.com/ClickHouse/ClickHouse/issues/9878)). [#9904](https://github.com/ClickHouse/ClickHouse/pull/9904) ([Amos Bird](https://github.com/amosbird)). +* Fix `'scalar does not exist'` error in ALTER queries ([#9878](https://github.com/ClickHouse/ClickHouse/issues/9878)). [#9904](https://github.com/ClickHouse/ClickHouse/pull/9904) ([Amos Bird](https://github.com/amosbird)). * Fix race condition between drop and optimize in `ReplicatedMergeTree`. [#9901](https://github.com/ClickHouse/ClickHouse/pull/9901) ([alesapin](https://github.com/alesapin)). * Fixed `DeleteOnDestroy` logic in `ATTACH PART` which could lead to automatic removal of attached part and added few tests. [#9410](https://github.com/ClickHouse/ClickHouse/pull/9410) ([Vladimir Chebotarev](https://github.com/excitoon)). @@ -3155,7 +3155,7 @@ No changes compared to v20.4.3.16-stable. #### Bug Fix * Fixed incorrect internal function names for `sumKahan` and `sumWithOverflow`. I lead to exception while using this functions in remote queries. [#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). This issue was in all ClickHouse releases. * Allow `ALTER ON CLUSTER` of `Distributed` tables with internal replication. This fixes [#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). This issue was in all ClickHouse releases. -* Fix possible exceptions `Size of filter doesn't match size of column` and `Invalid number of rows in Chunk` in `MergeTreeRangeReader`. They could appear while executing `PREWHERE` in some cases. Fixes [#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) +* Fix possible exceptions `Size of filter does not match size of column` and `Invalid number of rows in Chunk` in `MergeTreeRangeReader`. They could appear while executing `PREWHERE` in some cases. Fixes [#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) * Fixed the issue: timezone was not preserved if you write a simple arithmetic expression like `time + 1` (in contrast to an expression like `time + INTERVAL 1 SECOND`). This fixes [#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([alexey-milovidov](https://github.com/alexey-milovidov)). This issue was in all ClickHouse releases. * Now it's not possible to create or add columns with simple cyclic aliases like `a DEFAULT b, b DEFAULT a`. [#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) * Fixed the issue when padding at the end of base64 encoded value can be malformed. Update base64 library. This fixes [#9491](https://github.com/ClickHouse/ClickHouse/issues/9491), closes [#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -3199,7 +3199,7 @@ No changes compared to v20.4.3.16-stable. [#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) * Allow comma join with `IN()` inside. Fixes [#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). [#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4ertus2)](https://github.com/4ertus2) -* Improve `ALTER MODIFY/ADD` queries logic. Now you cannot `ADD` column without type, `MODIFY` default expression doesn't change type of column and `MODIFY` type doesn't loose default expression value. Fixes [#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). +* Improve `ALTER MODIFY/ADD` queries logic. Now you cannot `ADD` column without type, `MODIFY` default expression does not change type of column and `MODIFY` type does not loose default expression value. Fixes [#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) * Fix mutations finalization, when already done mutation can have status is_done=0. [#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) @@ -3283,7 +3283,7 @@ No changes compared to v20.4.3.16-stable. * Fix error "Mismatch column sizes" when inserting default `Tuple` from `JSONEachRow`. This fixes [#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([tavplubix](https://github.com/tavplubix)) * Now an exception will be thrown in case of using `WITH TIES` alongside `LIMIT BY`. Also add ability to use `TOP` with `LIMIT BY`. This fixes [#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) * Fix unintendent dependency from fresh glibc version in `clickhouse-odbc-bridge` binary. [#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([Amos Bird](https://github.com/amosbird)) -* Fix bug in check function of `*MergeTree` engines family. Now it doesn't fail in case when we have equal amount of rows in last granule and last mark (non-final). [#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([alesapin](https://github.com/alesapin)) +* Fix bug in check function of `*MergeTree` engines family. Now it does not fail in case when we have equal amount of rows in last granule and last mark (non-final). [#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([alesapin](https://github.com/alesapin)) * Fix insert into `Enum*` columns after `ALTER` query, when underlying numeric type is equal to table specified type. This fixes [#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([Anton Popov](https://github.com/CurtizJ)) * Allowed non-constant negative "size" argument for function `substring`. It was not allowed by mistake. This fixes [#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fix parsing bug when wrong number of arguments passed to `(O|J)DBC` table engine. [#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([alesapin](https://github.com/alesapin)) @@ -3296,7 +3296,7 @@ No changes compared to v20.4.3.16-stable. * Fix function `IN` inside `WHERE` statement when row-level table filter is present. Fixes [#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([Ivan](https://github.com/abyss7)) * Now an exception is thrown if the integral value is not parsed completely for settings values. [#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([Mikhail Korotov](https://github.com/millb)) * Fix exception when aggregate function is used in query to distributed table with more than two local shards. [#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) -* Now bloom filter can handle zero length arrays and doesn't perform redundant calculations. [#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbab](https://github.com/achimbab)) +* Now bloom filter can handle zero length arrays and does not perform redundant calculations. [#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbab](https://github.com/achimbab)) * Fixed checking if a client host is allowed by matching the client host to `host_regexp` specified in `users.xml`. [#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([Vitaly Baranov](https://github.com/vitlibar)) * Relax ambiguous column check that leads to false positives in multiple `JOIN ON` section. [#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([Artem Zuikov](https://github.com/4ertus2)) * Fixed possible server crash (`std::terminate`) when the server cannot send or write data in `JSON` or `XML` format with values of `String` data type (that require `UTF-8` validation) or when compressing result data with Brotli algorithm or in some other rare cases. This fixes [#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -3323,19 +3323,19 @@ No changes compared to v20.4.3.16-stable. * Fix error in background merge of columns with `SimpleAggregateFunction(LowCardinality)` type. [#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) * Fixed type check in function `toDateTime64`. [#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([Vasily Nemkov](https://github.com/Enmk)) * Now server do not crash on `LEFT` or `FULL JOIN` with and Join engine and unsupported `join_use_nulls` settings. [#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([Artem Zuikov](https://github.com/4ertus2)) -* Now `DROP DICTIONARY IF EXISTS db.dict` query doesn't throw exception if `db` doesn't exist. [#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +* Now `DROP DICTIONARY IF EXISTS db.dict` query does not throw exception if `db` does not exist. [#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) * Fix possible crashes in table functions (`file`, `mysql`, `remote`) caused by usage of reference to removed `IStorage` object. Fix incorrect parsing of columns specified at insertion into table function. [#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([tavplubix](https://github.com/tavplubix)) * Ensure network be up before starting `clickhouse-server`. This fixes [#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([Zhichang Yu](https://github.com/yuzhichang)) -* Fix timeouts handling for secure connections, so queries doesn't hang indefenitely. This fixes [#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix timeouts handling for secure connections, so queries does not hang indefenitely. This fixes [#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fix `clickhouse-copier`'s redundant contention between concurrent workers. [#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) -* Now mutations doesn't skip attached parts, even if their mutation version were larger than current mutation version. [#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) [#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) +* Now mutations does not skip attached parts, even if their mutation version were larger than current mutation version. [#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) [#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) * Ignore redundant copies of `*MergeTree` data parts after move to another disk and server restart. [#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) * Fix crash in `FULL JOIN` with `LowCardinality` in `JOIN` key. [#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([Artem Zuikov](https://github.com/4ertus2)) * Forbidden to use column name more than once in insert query like `INSERT INTO tbl (x, y, x)`. This fixes [#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([alesapin](https://github.com/alesapin)) * Added fallback for detection the number of physical CPU cores for unknown CPUs (using the number of logical CPU cores). This fixes [#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fix `There's no column` error for materialized and alias columns. [#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([Artem Zuikov](https://github.com/4ertus2)) * Fixed sever crash when `EXISTS` query was used without `TABLE` or `DICTIONARY` qualifier. Just like `EXISTS t`. This fixes [#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). This bug was introduced in version 19.17. [#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix rare bug with error `"Sizes of columns doesn't match"` that might appear when using `SimpleAggregateFunction` column. [#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +* Fix rare bug with error `"Sizes of columns does not match"` that might appear when using `SimpleAggregateFunction` column. [#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) * Fix bug where user with empty `allow_databases` got access to all databases (and same for `allow_dictionaries`). [#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([DeifyTheGod](https://github.com/DeifyTheGod)) * Fix client crash when server already disconnected from client. [#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) * Fix `ORDER BY` behaviour in case of sorting by primary key prefix and non primary key suffix. [#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([Anton Popov](https://github.com/CurtizJ)) @@ -3367,7 +3367,7 @@ No changes compared to v20.4.3.16-stable. * Fix `CHECK TABLE` query for `*MergeTree` tables without key. Fixes [#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) * Fixed conversion of `Float64` to MySQL type. [#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([Yuriy Baranov](https://github.com/yurriy)) * Now if table was not completely dropped because of server crash, server will try to restore and load it. [#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) -* Fixed crash in table function `file` while inserting into file that doesn't exist. Now in this case file would be created and then insert would be processed. [#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +* Fixed crash in table function `file` while inserting into file that does not exist. Now in this case file would be created and then insert would be processed. [#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) * Fix rare deadlock which can happen when `trace_log` is in enabled. [#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) * Add ability to work with different types besides `Date` in `RangeHashed` external dictionary created from DDL query. Fixes [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) * Fixes crash when `now64()` is called with result of another function. [#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([Vasily Nemkov](https://github.com/Enmk)) @@ -3465,7 +3465,7 @@ No changes compared to v20.4.3.16-stable. * Add performance tests for short string optimized hash tables. [#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([Amos Bird](https://github.com/amosbird)) * Now ClickHouse will build on `AArch64` even if `MADV_FREE` is not available. This fixes [#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([Amos Bird](https://github.com/amosbird)) * Update `zlib-ng` to fix memory sanitizer problems. [#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([Alexander Kuzmenkov](https://github.com/akuzm)) -* Enable internal MySQL library on non-Linux system, because usage of OS packages is very fragile and usually doesn't work at all. This fixes [#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Enable internal MySQL library on non-Linux system, because usage of OS packages is very fragile and usually does not work at all. This fixes [#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fixed build on some systems after enabling `libc++`. This supersedes [#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Make `Field` methods more type-safe to find more errors. [#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([Alexander Kuzmenkov](https://github.com/akuzm)) * Added missing files to the `libc-headers` submodule. [#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -3494,7 +3494,7 @@ No changes compared to v20.4.3.16-stable. * Fix mode of default password file for `.deb` linux distros. [#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([proller](https://github.com/proller)) * Improved expression for getting `clickhouse-server` PID in `clickhouse-test`. [#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([Alexander Kazakov](https://github.com/Akazz)) * Updated contrib/googletest to v1.10.0. [#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([Alexander Burmak](https://github.com/Alex-Burmak)) -* Fixed ThreadSaninitizer report in `base64` library. Also updated this library to the latest version, but it doesn't matter. This fixes [#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed ThreadSaninitizer report in `base64` library. Also updated this library to the latest version, but it does not matter. This fixes [#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fix `00600_replace_running_query` for processors. [#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) * Remove support for `tcmalloc` to make `CMakeLists.txt` simpler. [#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Release gcc builds now use `libc++` instead of `libstdc++`. Recently `libc++` was used only with clang. This will improve consistency of build configurations and portability. [#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([alexey-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/es/commercial/cloud.md b/docs/es/commercial/cloud.md deleted file mode 100644 index bc593a82ad7..00000000000 --- a/docs/es/commercial/cloud.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 1 -toc_title: Nube ---- - -# Proveedores de servicios en la nube de ClickHouse {#clickhouse-cloud-service-providers} - -!!! info "INFO" - Si ha lanzado una nube pública con el servicio ClickHouse administrado, no dude en [abrir una solicitud de extracción](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) añadiéndolo a la siguiente lista. - -## Nube de Yandex {#yandex-cloud} - -[Servicio administrado de Yandex para ClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) proporciona las siguientes características clave: - -- Servicio ZooKeeper totalmente gestionado para [Replicación de ClickHouse](../engines/table-engines/mergetree-family/replication.md) -- Múltiples opciones de tipo de almacenamiento -- Réplicas en diferentes zonas de disponibilidad -- Cifrado y aislamiento -- Mantenimiento automatizado - -{## [Artículo Original](https://clickhouse.tech/docs/en/commercial/cloud/) ##} diff --git a/docs/es/commercial/index.md b/docs/es/commercial/index.md deleted file mode 100644 index b367631ae1c..00000000000 --- a/docs/es/commercial/index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Comercial -toc_priority: 70 -toc_title: Comercial ---- - - diff --git a/docs/es/commercial/support.md b/docs/es/commercial/support.md deleted file mode 100644 index a817d90dcb5..00000000000 --- a/docs/es/commercial/support.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 3 -toc_title: Apoyo ---- - -# Proveedores de servicios de soporte comercial ClickHouse {#clickhouse-commercial-support-service-providers} - -!!! info "INFO" - Si ha lanzado un servicio de soporte comercial ClickHouse, no dude en [abrir una solicitud de extracción](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) añadiéndolo a la siguiente lista. - -## Altinidad {#altinity} - -Altinity ha ofrecido soporte y servicios empresariales ClickHouse desde 2017. Los clientes de Altinity van desde empresas Fortune 100 hasta startups. Visitar [Más información](https://www.altinity.com/) para más información. - -## Mafiree {#mafiree} - -[Descripción del servicio](http://mafiree.com/clickhouse-analytics-services.php) - -## MinervaDB {#minervadb} - -[Descripción del servicio](https://minervadb.com/index.php/clickhouse-consulting-and-support-by-minervadb/) diff --git a/docs/es/development/architecture.md b/docs/es/development/architecture.md deleted file mode 100644 index 1620a58a3a0..00000000000 --- a/docs/es/development/architecture.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 62 -toc_title: "Descripci\xF3n general de la arquitectura ClickHouse" ---- - -# Descripción general de la arquitectura ClickHouse {#overview-of-clickhouse-architecture} - -ClickHouse es un verdadero DBMS orientado a columnas. Los datos se almacenan por columnas y durante la ejecución de matrices (vectores o fragmentos de columnas). Siempre que sea posible, las operaciones se envían en matrices, en lugar de en valores individuales. Se llama “vectorized query execution,” y ayuda a reducir el costo del procesamiento de datos real. - -> Esta idea no es nada nuevo. Se remonta a la `APL` lenguaje de programación y sus descendientes: `A +`, `J`, `K`, y `Q`. La programación de matrices se utiliza en el procesamiento de datos científicos. Tampoco es esta idea algo nuevo en las bases de datos relacionales: por ejemplo, se usa en el `Vectorwise` sistema. - -Existen dos enfoques diferentes para acelerar el procesamiento de consultas: la ejecución de consultas vectorizadas y la generación de código en tiempo de ejecución. Este último elimina toda la indirección y el despacho dinámico. Ninguno de estos enfoques es estrictamente mejor que el otro. La generación de código de tiempo de ejecución puede ser mejor cuando fusiona muchas operaciones, utilizando así las unidades de ejecución de la CPU y la canalización. La ejecución de consultas vectorizadas puede ser menos práctica porque implica vectores temporales que deben escribirse en la memoria caché y leerse. Si los datos temporales no caben en la memoria caché L2, esto se convierte en un problema. Pero la ejecución de consultas vectorizadas utiliza más fácilmente las capacidades SIMD de la CPU. Un [documento de investigación](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) escrito por nuestros amigos muestra que es mejor combinar ambos enfoques. ClickHouse utiliza la ejecución de consultas vectorizadas y tiene un soporte inicial limitado para la generación de código en tiempo de ejecución. - -## Columna {#columns} - -`IColumn` interfaz se utiliza para representar columnas en la memoria (en realidad, fragmentos de columnas). Esta interfaz proporciona métodos auxiliares para la implementación de varios operadores relacionales. Casi todas las operaciones son inmutables: no modifican la columna original, sino que crean una nueva modificada. Por ejemplo, el `IColumn :: filter` método acepta una máscara de bytes de filtro. Se utiliza para el `WHERE` y `HAVING` operadores relacionales. Ejemplos adicionales: el `IColumn :: permute` para apoyar `ORDER BY`, el `IColumn :: cut` para apoyar `LIMIT`. - -Diversos `IColumn` aplicación (`ColumnUInt8`, `ColumnString`, y así sucesivamente) son responsables del diseño de memoria de las columnas. El diseño de memoria suele ser una matriz contigua. Para el tipo entero de columnas, es solo una matriz contigua, como `std :: vector`. Para `String` y `Array` columnas, son dos vectores: uno para todos los elementos de la matriz, colocados contiguamente, y un segundo para los desplazamientos al comienzo de cada matriz. También hay `ColumnConst` que almacena solo un valor en la memoria, pero parece una columna. - -## Campo {#field} - -Sin embargo, también es posible trabajar con valores individuales. Para representar un valor individual, el `Field` se utiliza. `Field` es sólo una unión discriminada de `UInt64`, `Int64`, `Float64`, `String` y `Array`. `IColumn` tiene el `operator[]` para obtener el valor n-ésimo como un `Field` y el `insert` método para agregar un `Field` al final de una columna. Estos métodos no son muy eficientes, ya que requieren tratar con temporal `Field` objetos que representan un valor individual. Hay métodos más eficientes, tales como `insertFrom`, `insertRangeFrom` y así sucesivamente. - -`Field` no tiene suficiente información sobre un tipo de datos específico para una tabla. Por ejemplo, `UInt8`, `UInt16`, `UInt32`, y `UInt64` todos están representados como `UInt64` en una `Field`. - -## Abstracciones con fugas {#leaky-abstractions} - -`IColumn` tiene métodos para transformaciones relacionales comunes de datos, pero no satisfacen todas las necesidades. Por ejemplo, `ColumnUInt64` no tiene un método para calcular la suma de dos columnas, y `ColumnString` no tiene un método para ejecutar una búsqueda de subcadena. Estas innumerables rutinas se implementan fuera de `IColumn`. - -Varias funciones en columnas se pueden implementar de una manera genérica, no eficiente utilizando `IColumn` para extraer `Field` valores, o de una manera especializada utilizando el conocimiento del diseño de la memoria interna de los datos en un `IColumn` aplicación. Se implementa mediante la conversión de funciones a un `IColumn` escriba y trate con la representación interna directamente. Por ejemplo, `ColumnUInt64` tiene el `getData` método que devuelve una referencia a una matriz interna, luego una rutina separada lee o llena esa matriz directamente. Tenemos “leaky abstractions” para permitir especializaciones eficientes de varias rutinas. - -## Tipos de datos {#data_types} - -`IDataType` es responsable de la serialización y deserialización: para leer y escribir fragmentos de columnas o valores individuales en formato binario o de texto. `IDataType` corresponde directamente a los tipos de datos en las tablas. Por ejemplo, hay `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` y así sucesivamente. - -`IDataType` y `IColumn` están vagamente relacionados entre sí. Diferentes tipos de datos se pueden representar en la memoria por el mismo `IColumn` aplicación. Por ejemplo, `DataTypeUInt32` y `DataTypeDateTime` están representados por `ColumnUInt32` o `ColumnConstUInt32`. Además, el mismo tipo de datos se puede representar mediante `IColumn` aplicación. Por ejemplo, `DataTypeUInt8` puede ser representado por `ColumnUInt8` o `ColumnConstUInt8`. - -`IDataType` sólo almacena metadatos. Por ejemplo, `DataTypeUInt8` no almacena nada en absoluto (excepto vptr) y `DataTypeFixedString` tiendas solo `N` (el tamaño de las cadenas de tamaño fijo). - -`IDataType` tiene métodos auxiliares para varios formatos de datos. Los ejemplos son métodos para serializar un valor con posibles citas, para serializar un valor para JSON y para serializar un valor como parte del formato XML. No hay correspondencia directa con los formatos de datos. Por ejemplo, los diferentes formatos de datos `Pretty` y `TabSeparated` puede utilizar el mismo `serializeTextEscaped` método de ayuda de la `IDataType` interfaz. - -## Bloque {#block} - -A `Block` es un contenedor que representa un subconjunto (porción) de una tabla en la memoria. Es sólo un conjunto de triples: `(IColumn, IDataType, column name)`. Durante la ejecución de la consulta, los datos son procesados por `Block`s. Si tenemos un `Block`, tenemos datos (en el `IColumn` objeto), tenemos información sobre su tipo (en `IDataType`) que nos dice cómo lidiar con esa columna, y tenemos el nombre de la columna. Podría ser el nombre de columna original de la tabla o algún nombre artificial asignado para obtener resultados temporales de los cálculos. - -Cuando calculamos alguna función sobre columnas en un bloque, agregamos otra columna con su resultado al bloque, y no tocamos columnas para argumentos de la función porque las operaciones son inmutables. Más tarde, las columnas innecesarias se pueden eliminar del bloque, pero no se pueden modificar. Es conveniente para la eliminación de subexpresiones comunes. - -Se crean bloques para cada fragmento de datos procesado. Tenga en cuenta que para el mismo tipo de cálculo, los nombres y tipos de columna siguen siendo los mismos para diferentes bloques y solo cambian los datos de columna. Es mejor dividir los datos del bloque desde el encabezado del bloque porque los tamaños de bloque pequeños tienen una gran sobrecarga de cadenas temporales para copiar shared_ptrs y nombres de columna. - -## Bloquear flujos {#block-streams} - -Los flujos de bloques son para procesar datos. Usamos flujos de bloques para leer datos de algún lugar, realizar transformaciones de datos o escribir datos en algún lugar. `IBlockInputStream` tiene el `read` método para buscar el siguiente bloque mientras esté disponible. `IBlockOutputStream` tiene el `write` método para empujar el bloque en alguna parte. - -Los flujos son responsables de: - -1. Leer o escribir en una mesa. La tabla solo devuelve una secuencia para leer o escribir bloques. -2. Implementación de formatos de datos. Por ejemplo, si desea enviar datos a un terminal en `Pretty` formato, crea un flujo de salida de bloque donde presiona bloques y los formatea. -3. Realización de transformaciones de datos. Digamos que tienes `IBlockInputStream` y desea crear una secuencia filtrada. Usted crea `FilterBlockInputStream` e inicializarlo con su transmisión. Luego, cuando tiras de un bloque de `FilterBlockInputStream`, extrae un bloque de su flujo, lo filtra y le devuelve el bloque filtrado. Las canalizaciones de ejecución de consultas se representan de esta manera. - -Hay transformaciones más sofisticadas. Por ejemplo, cuando tiras de `AggregatingBlockInputStream`, lee todos los datos de su origen, los agrega y, a continuación, devuelve un flujo de datos agregados para usted. Otro ejemplo: `UnionBlockInputStream` acepta muchas fuentes de entrada en el constructor y también una serie de subprocesos. Lanza múltiples hilos y lee de múltiples fuentes en paralelo. - -> Las secuencias de bloques usan el “pull” enfoque para controlar el flujo: cuando extrae un bloque de la primera secuencia, en consecuencia extrae los bloques requeridos de las secuencias anidadas, y toda la tubería de ejecución funcionará. Ni “pull” ni “push” es la mejor solución, porque el flujo de control está implícito y eso limita la implementación de varias características, como la ejecución simultánea de múltiples consultas (fusionando muchas tuberías). Esta limitación podría superarse con coroutines o simplemente ejecutando hilos adicionales que se esperan el uno al otro. Podemos tener más posibilidades si hacemos explícito el flujo de control: si localizamos la lógica para pasar datos de una unidad de cálculo a otra fuera de esas unidades de cálculo. Lea esto [artículo](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) para más pensamientos. - -Debemos tener en cuenta que la canalización de ejecución de consultas crea datos temporales en cada paso. Tratamos de mantener el tamaño del bloque lo suficientemente pequeño para que los datos temporales se ajusten a la memoria caché de la CPU. Con esa suposición, escribir y leer datos temporales es casi gratis en comparación con otros cálculos. Podríamos considerar una alternativa, que es fusionar muchas operaciones en la tubería. Podría hacer que la tubería sea lo más corta posible y eliminar gran parte de los datos temporales, lo que podría ser una ventaja, pero también tiene inconvenientes. Por ejemplo, una canalización dividida facilita la implementación de almacenamiento en caché de datos intermedios, el robo de datos intermedios de consultas similares que se ejecutan al mismo tiempo y la fusión de canalizaciones para consultas similares. - -## Formato {#formats} - -Los formatos de datos se implementan con flujos de bloques. Hay “presentational” sólo es adecuado para la salida de datos al cliente, tales como `Pretty` formato, que proporciona sólo `IBlockOutputStream`. Y hay formatos de entrada / salida, como `TabSeparated` o `JSONEachRow`. - -También hay secuencias de filas: `IRowInputStream` y `IRowOutputStream`. Permiten pull/push datos por filas individuales, no por bloques. Y solo son necesarios para simplificar la implementación de formatos orientados a filas. Envoltura `BlockInputStreamFromRowInputStream` y `BlockOutputStreamFromRowOutputStream` le permite convertir flujos orientados a filas en flujos regulares orientados a bloques. - -## I/O {#io} - -Para la entrada / salida orientada a bytes, hay `ReadBuffer` y `WriteBuffer` clases abstractas. Se usan en lugar de C ++ `iostream`s. No se preocupe: cada proyecto maduro de C ++ está usando algo más que `iostream`s por buenas razones. - -`ReadBuffer` y `WriteBuffer` son solo un búfer contiguo y un cursor apuntando a la posición en ese búfer. Las implementaciones pueden poseer o no la memoria del búfer. Hay un método virtual para llenar el búfer con los siguientes datos (para `ReadBuffer`) o para vaciar el búfer en algún lugar (para `WriteBuffer`). Los métodos virtuales rara vez se llaman. - -Implementaciones de `ReadBuffer`/`WriteBuffer` se utilizan para trabajar con archivos y descriptores de archivos y sockets de red, para implementar la compresión (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer`, y `HashingWriteBuffer` hablar por sí mismos. - -Read / WriteBuffers solo se ocupan de bytes. Hay funciones de `ReadHelpers` y `WriteHelpers` archivos de encabezado para ayudar con el formato de entrada / salida. Por ejemplo, hay ayudantes para escribir un número en formato decimal. - -Veamos qué sucede cuando quieres escribir un conjunto de resultados en `JSON` formato a stdout. Tiene un conjunto de resultados listo para ser recuperado de `IBlockInputStream`. Usted crea `WriteBufferFromFileDescriptor(STDOUT_FILENO)` para escribir bytes en stdout. Usted crea `JSONRowOutputStream`, inicializado con eso `WriteBuffer` para escribir filas en `JSON` a stdout. Usted crea `BlockOutputStreamFromRowOutputStream` encima de él, para representarlo como `IBlockOutputStream`. Entonces usted llama `copyData` para transferir datos desde `IBlockInputStream` a `IBlockOutputStream` y todo funciona. Internamente, `JSONRowOutputStream` escribirá varios delimitadores JSON y llamará al `IDataType::serializeTextJSON` con una referencia a `IColumn` y el número de fila como argumentos. Consecuentemente, `IDataType::serializeTextJSON` llamará a un método de `WriteHelpers.h`: por ejemplo, `writeText` para tipos numéricos y `writeJSONString` para `DataTypeString`. - -## Tabla {#tables} - -El `IStorage` interfaz representa tablas. Las diferentes implementaciones de esa interfaz son diferentes motores de tabla. Los ejemplos son `StorageMergeTree`, `StorageMemory` y así sucesivamente. Las instancias de estas clases son solo tablas. - -Clave `IStorage` son `read` y `write`. También hay `alter`, `rename`, `drop` y así sucesivamente. El `read` método acepta los siguientes argumentos: el conjunto de columnas para leer de una tabla, el `AST` consulta a considerar, y el número deseado de flujos para devolver. Devuelve uno o varios `IBlockInputStream` objetos e información sobre la etapa de procesamiento de datos que se completó dentro de un motor de tablas durante la ejecución de la consulta. - -En la mayoría de los casos, el método de lectura solo es responsable de leer las columnas especificadas de una tabla, no de ningún procesamiento de datos adicional. Todo el procesamiento de datos adicional es realizado por el intérprete de consultas y está fuera de la responsabilidad de `IStorage`. - -Pero hay excepciones notables: - -- La consulta AST se pasa al `read` método, y el motor de tablas puede usarlo para derivar el uso del índice y leer menos datos de una tabla. -- A veces, el motor de tablas puede procesar los datos a una etapa específica. Por ejemplo, `StorageDistributed` puede enviar una consulta a servidores remotos, pedirles que procesen datos a una etapa donde se puedan fusionar datos de diferentes servidores remotos y devolver esos datos preprocesados. El intérprete de consultas termina de procesar los datos. - -Tabla `read` método puede devolver múltiples `IBlockInputStream` objetos para permitir el procesamiento de datos en paralelo. Estos flujos de entrada de bloques múltiples pueden leer de una tabla en paralelo. A continuación, puede ajustar estas secuencias con varias transformaciones (como la evaluación de expresiones o el filtrado) que se pueden calcular de forma independiente y crear un `UnionBlockInputStream` encima de ellos, para leer desde múltiples flujos en paralelo. - -También hay `TableFunction`s. Estas son funciones que devuelven un `IStorage` objeto a utilizar en el `FROM` cláusula de una consulta. - -Para tener una idea rápida de cómo implementar su motor de tabla, vea algo simple, como `StorageMemory` o `StorageTinyLog`. - -> Como resultado de la `read` método, `IStorage` devoluciones `QueryProcessingStage` – information about what parts of the query were already calculated inside storage. - -## Analizador {#parsers} - -Un analizador de descenso recursivo escrito a mano analiza una consulta. Por ejemplo, `ParserSelectQuery` simplemente llama recursivamente a los analizadores subyacentes para varias partes de la consulta. Los analizadores crean un `AST`. El `AST` está representado por nodos, que son instancias de `IAST`. - -> Los generadores de analizadores no se utilizan por razones históricas. - -## Interprete {#interpreters} - -Los intérpretes son responsables de crear la canalización de ejecución de consultas `AST`. Hay intérpretes simples, como `InterpreterExistsQuery` y `InterpreterDropQuery` o el más sofisticado `InterpreterSelectQuery`. La canalización de ejecución de consultas es una combinación de flujos de entrada o salida de bloques. Por ejemplo, el resultado de interpretar el `SELECT` la consulta es la `IBlockInputStream` para leer el conjunto de resultados; el resultado de la consulta INSERT es el `IBlockOutputStream` para escribir datos para su inserción, y el resultado de interpretar el `INSERT SELECT` la consulta es la `IBlockInputStream` que devuelve un conjunto de resultados vacío en la primera lectura, pero que copia datos de `SELECT` a `INSERT` al mismo tiempo. - -`InterpreterSelectQuery` utilizar `ExpressionAnalyzer` y `ExpressionActions` maquinaria para el análisis de consultas y transformaciones. Aquí es donde se realizan la mayoría de las optimizaciones de consultas basadas en reglas. `ExpressionAnalyzer` es bastante complicado y debe reescribirse: se deben extraer varias transformaciones de consultas y optimizaciones para separar clases para permitir transformaciones modulares o consultas. - -## Función {#functions} - -Hay funciones ordinarias y funciones agregadas. Para las funciones agregadas, consulte la siguiente sección. - -Ordinary functions don't change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`de datos para implementar la ejecución de consultas vectorizadas. - -Hay algunas funciones diversas, como [BlockSize](../sql-reference/functions/other-functions.md#function-blocksize), [rowNumberInBlock](../sql-reference/functions/other-functions.md#function-rownumberinblock), y [runningAccumulate](../sql-reference/functions/other-functions.md#function-runningaccumulate), que explotan el procesamiento de bloques y violan la independencia de las filas. - -ClickHouse tiene una tipificación fuerte, por lo que no hay conversión de tipo implícita. Si una función no admite una combinación específica de tipos, produce una excepción. Pero las funciones pueden funcionar (estar sobrecargadas) para muchas combinaciones diferentes de tipos. Por ejemplo, el `plus` función (para implementar el `+` operador) funciona para cualquier combinación de tipos numéricos: `UInt8` + `Float32`, `UInt16` + `Int8` y así sucesivamente. Además, algunas funciones variadas pueden aceptar cualquier número de argumentos, como el `concat` función. - -Implementar una función puede ser un poco inconveniente porque una función distribuye explícitamente tipos de datos compatibles y `IColumns`. Por ejemplo, el `plus` La función tiene código generado por la creación de instancias de una plantilla de C ++ para cada combinación de tipos numéricos y argumentos izquierdo y derecho constantes o no constantes. - -Es un excelente lugar para implementar la generación de código en tiempo de ejecución para evitar la hinchazón del código de plantilla. Además, permite agregar funciones fusionadas como multiplicar-agregar fusionado o hacer comparaciones múltiples en una iteración de bucle. - -Debido a la ejecución de consultas vectorizadas, las funciones no se cortocircuitan. Por ejemplo, si escribe `WHERE f(x) AND g(y)`, ambos lados se calculan, incluso para las filas, cuando `f(x)` es cero (excepto cuando `f(x)` es una expresión constante cero). Pero si la selectividad del `f(x)` la condición es alta, y el cálculo de `f(x)` es mucho más barato que `g(y)`, es mejor implementar el cálculo de paso múltiple. Primero calcularía `f(x)`, a continuación, filtrar columnas por el resultado, y luego calcular `g(y)` solo para trozos de datos más pequeños y filtrados. - -## Funciones agregadas {#aggregate-functions} - -Las funciones agregadas son funciones con estado. Acumulan valores pasados en algún estado y le permiten obtener resultados de ese estado. Se gestionan con el `IAggregateFunction` interfaz. Los estados pueden ser bastante simples (el estado para `AggregateFunctionCount` es sólo una sola `UInt64` valor) o bastante complejo (el estado de `AggregateFunctionUniqCombined` es una combinación de una matriz lineal, una tabla hash, y un `HyperLogLog` estructura de datos probabilística). - -Los Estados están asignados en `Arena` (un grupo de memoria) para tratar con múltiples estados mientras se ejecuta una alta cardinalidad `GROUP BY` consulta. Los estados pueden tener un constructor y destructor no trivial: por ejemplo, los estados de agregación complicados pueden asignar memoria adicional ellos mismos. Requiere cierta atención a la creación y destrucción de estados y a la adecuada aprobación de su orden de propiedad y destrucción. - -Los estados de agregación se pueden serializar y deserializar para pasar a través de la red durante la ejecución de consultas distribuidas o para escribirlos en el disco donde no hay suficiente RAM. Incluso se pueden almacenar en una tabla con el `DataTypeAggregateFunction` para permitir la agregación incremental de datos. - -> El formato de datos serializados para los estados de función agregados no tiene versiones en este momento. Está bien si los estados agregados solo se almacenan temporalmente. Pero tenemos el `AggregatingMergeTree` motor de tabla para la agregación incremental, y la gente ya lo está utilizando en producción. Es la razón por la que se requiere compatibilidad con versiones anteriores al cambiar el formato serializado para cualquier función agregada en el futuro. - -## Servidor {#server} - -El servidor implementa varias interfaces diferentes: - -- Una interfaz HTTP para cualquier cliente extranjero. -- Una interfaz TCP para el cliente nativo de ClickHouse y para la comunicación entre servidores durante la ejecución de consultas distribuidas. -- Una interfaz para transferir datos para la replicación. - -Internamente, es solo un servidor multiproceso primitivo sin corutinas ni fibras. Dado que el servidor no está diseñado para procesar una alta tasa de consultas simples, sino para procesar una tasa relativamente baja de consultas complejas, cada uno de ellos puede procesar una gran cantidad de datos para análisis. - -El servidor inicializa el `Context` clase con el entorno necesario para la ejecución de consultas: la lista de bases de datos disponibles, usuarios y derechos de acceso, configuración, clústeres, la lista de procesos, el registro de consultas, etc. Los intérpretes utilizan este entorno. - -Mantenemos una compatibilidad total con versiones anteriores y posteriores para el protocolo TCP del servidor: los clientes antiguos pueden hablar con servidores nuevos y los nuevos clientes pueden hablar con servidores antiguos. Pero no queremos mantenerlo eternamente, y estamos eliminando el soporte para versiones antiguas después de aproximadamente un año. - -!!! note "Nota" - Para la mayoría de las aplicaciones externas, recomendamos usar la interfaz HTTP porque es simple y fácil de usar. El protocolo TCP está más estrechamente vinculado a las estructuras de datos internas: utiliza un formato interno para pasar bloques de datos y utiliza marcos personalizados para datos comprimidos. No hemos lanzado una biblioteca C para ese protocolo porque requiere vincular la mayor parte de la base de código ClickHouse, lo cual no es práctico. - -## Ejecución de consultas distribuidas {#distributed-query-execution} - -Los servidores de una configuración de clúster son en su mayoría independientes. Puede crear un `Distributed` en uno o todos los servidores de un clúster. El `Distributed` table does not store data itself – it only provides a “view” a todas las tablas locales en varios nodos de un clúster. Cuando se SELECCIONA desde un `Distributed` tabla, reescribe esa consulta, elige nodos remotos de acuerdo con la configuración de equilibrio de carga y les envía la consulta. El `Distributed` table solicita a los servidores remotos que procesen una consulta hasta una etapa en la que se pueden fusionar resultados intermedios de diferentes servidores. Luego recibe los resultados intermedios y los fusiona. La tabla distribuida intenta distribuir tanto trabajo como sea posible a servidores remotos y no envía muchos datos intermedios a través de la red. - -Las cosas se vuelven más complicadas cuando tiene subconsultas en cláusulas IN o JOIN, y cada una de ellas usa un `Distributed` tabla. Tenemos diferentes estrategias para la ejecución de estas consultas. - -No existe un plan de consulta global para la ejecución de consultas distribuidas. Cada nodo tiene su plan de consulta local para su parte del trabajo. Solo tenemos una ejecución simple de consultas distribuidas de un solo paso: enviamos consultas para nodos remotos y luego fusionamos los resultados. Pero esto no es factible para consultas complicadas con alta cardinalidad GROUP BY o con una gran cantidad de datos temporales para JOIN. En tales casos, necesitamos “reshuffle” datos entre servidores, lo que requiere una coordinación adicional. ClickHouse no admite ese tipo de ejecución de consultas, y tenemos que trabajar en ello. - -## Árbol de fusión {#merge-tree} - -`MergeTree` es una familia de motores de almacenamiento que admite la indexación por clave principal. La clave principal puede ser una tupla arbitraria de columnas o expresiones. Datos en un `MergeTree` se almacena en “parts”. Cada parte almacena los datos en el orden de la clave principal, por lo que la tupla de la clave principal ordena los datos lexicográficamente. Todas las columnas de la tabla se almacenan en `column.bin` archivos en estas partes. Los archivos consisten en bloques comprimidos. Cada bloque suele ser de 64 KB a 1 MB de datos sin comprimir, dependiendo del tamaño del valor promedio. Los bloques constan de valores de columna colocados contiguamente uno tras otro. Los valores de columna están en el mismo orden para cada columna (la clave principal define el orden), por lo que cuando itera por muchas columnas, obtiene valores para las filas correspondientes. - -La clave principal en sí es “sparse”. No aborda cada fila, sino solo algunos rangos de datos. Separado `primary.idx` file tiene el valor de la clave principal para cada fila N-ésima, donde se llama N `index_granularity` (generalmente, N = 8192). Además, para cada columna, tenemos `column.mrk` archivos con “marks,” que son desplazamientos a cada fila N-ésima en el archivo de datos. Cada marca es un par: el desplazamiento en el archivo al comienzo del bloque comprimido y el desplazamiento en el bloque descomprimido al comienzo de los datos. Por lo general, los bloques comprimidos están alineados por marcas, y el desplazamiento en el bloque descomprimido es cero. Datos para `primary.idx` siempre reside en la memoria, y los datos para `column.mrk` archivos se almacena en caché. - -Cuando vamos a leer algo de una parte en `MergeTree` miramos `primary.idx` datos y localice rangos que podrían contener datos solicitados, luego mire `column.mrk` datos y calcular compensaciones para dónde comenzar a leer esos rangos. Debido a la escasez, el exceso de datos puede ser leído. ClickHouse no es adecuado para una gran carga de consultas de puntos simples, porque todo el rango con `index_granularity` se deben leer filas para cada clave, y todo el bloque comprimido debe descomprimirse para cada columna. Hicimos que el índice sea disperso porque debemos poder mantener billones de filas por único servidor sin un consumo de memoria notable para el índice. Además, debido a que la clave principal es escasa, no es única: no puede verificar la existencia de la clave en la tabla en el momento de INSERTAR. Podría tener muchas filas con la misma clave en una tabla. - -Cuando `INSERT` un montón de datos en `MergeTree`, ese grupo está ordenado por orden de clave primaria y forma una nueva parte. Hay subprocesos de fondo que seleccionan periódicamente algunas partes y las fusionan en una sola parte ordenada para mantener el número de partes relativamente bajo. Es por eso que se llama `MergeTree`. Por supuesto, la fusión conduce a “write amplification”. Todas las partes son inmutables: solo se crean y eliminan, pero no se modifican. Cuando se ejecuta SELECT, contiene una instantánea de la tabla (un conjunto de partes). Después de la fusión, también mantenemos las piezas viejas durante algún tiempo para facilitar la recuperación después de la falla, por lo que si vemos que alguna parte fusionada probablemente esté rota, podemos reemplazarla con sus partes de origen. - -`MergeTree` no es un árbol de LSM porque no contiene “memtable” y “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity's sake, and because we are already inserting data in batches in our applications. - -> Las tablas MergeTree solo pueden tener un índice (primario): no hay índices secundarios. Sería bueno permitir múltiples representaciones físicas bajo una tabla lógica, por ejemplo, para almacenar datos en más de un orden físico o incluso para permitir representaciones con datos preagregados junto con datos originales. - -Hay motores MergeTree que están haciendo un trabajo adicional durante las fusiones en segundo plano. Los ejemplos son `CollapsingMergeTree` y `AggregatingMergeTree`. Esto podría tratarse como soporte especial para actualizaciones. Tenga en cuenta que estas no son actualizaciones reales porque los usuarios generalmente no tienen control sobre el tiempo en que se ejecutan las fusiones en segundo plano y los datos en un `MergeTree` casi siempre se almacena en más de una parte, no en forma completamente fusionada. - -## Replicación {#replication} - -La replicación en ClickHouse se puede configurar por tabla. Podría tener algunas tablas replicadas y otras no replicadas en el mismo servidor. También puede tener tablas replicadas de diferentes maneras, como una tabla con replicación de dos factores y otra con replicación de tres factores. - -La replicación se implementa en el `ReplicatedMergeTree` motor de almacenamiento. El camino en `ZooKeeper` se especifica como un parámetro para el motor de almacenamiento. Todas las tablas con la misma ruta en `ZooKeeper` se convierten en réplicas entre sí: sincronizan sus datos y mantienen la coherencia. Las réplicas se pueden agregar y eliminar dinámicamente simplemente creando o soltando una tabla. - -La replicación utiliza un esquema multi-maestro asíncrono. Puede insertar datos en cualquier réplica que tenga una sesión con `ZooKeeper`, y los datos se replican en todas las demás réplicas de forma asíncrona. Como ClickHouse no admite UPDATE, la replicación está libre de conflictos. Como no hay reconocimiento de quórum de inserciones, los datos recién insertados pueden perderse si un nodo falla. - -Los metadatos para la replicación se almacenan en ZooKeeper. Hay un registro de replicación que enumera las acciones que se deben realizar. Las acciones son: obtener parte; fusionar partes; soltar una partición, etc. Cada réplica copia el registro de replicación en su cola y, a continuación, ejecuta las acciones desde la cola. Por ejemplo, en la inserción, el “get the part” la acción se crea en el registro y cada réplica descarga esa parte. Las fusiones se coordinan entre réplicas para obtener resultados idénticos en bytes. Todas las piezas se combinan de la misma manera en todas las réplicas. Se logra eligiendo una réplica como líder, y esa réplica inicia fusiones y escrituras “merge parts” acciones al registro. - -La replicación es física: solo las partes comprimidas se transfieren entre nodos, no consultas. Las fusiones se procesan en cada réplica de forma independiente en la mayoría de los casos para reducir los costos de red al evitar la amplificación de la red. Las piezas combinadas grandes se envían a través de la red solo en casos de retraso de replicación significativo. - -Además, cada réplica almacena su estado en ZooKeeper como el conjunto de piezas y sus sumas de comprobación. Cuando el estado en el sistema de archivos local difiere del estado de referencia en ZooKeeper, la réplica restaura su coherencia descargando partes faltantes y rotas de otras réplicas. Cuando hay algunos datos inesperados o rotos en el sistema de archivos local, ClickHouse no los elimina, sino que los mueve a un directorio separado y los olvida. - -!!! note "Nota" - El clúster ClickHouse consta de fragmentos independientes y cada fragmento consta de réplicas. El clúster es **no elástico**, por lo tanto, después de agregar un nuevo fragmento, los datos no se reequilibran automáticamente entre fragmentos. En su lugar, se supone que la carga del clúster debe ajustarse para que sea desigual. Esta implementación le da más control, y está bien para clústeres relativamente pequeños, como decenas de nodos. Pero para los clústeres con cientos de nodos que estamos utilizando en producción, este enfoque se convierte en un inconveniente significativo. Debemos implementar un motor de tablas que abarque todo el clúster con regiones replicadas dinámicamente que puedan dividirse y equilibrarse entre clústeres automáticamente. - -{## [Artículo Original](https://clickhouse.tech/docs/en/development/architecture/) ##} diff --git a/docs/es/development/browse-code.md b/docs/es/development/browse-code.md deleted file mode 100644 index ca031ad03f3..00000000000 --- a/docs/es/development/browse-code.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 63 -toc_title: "Buscar c\xF3digo fuente" ---- - -# Examinar el código fuente de ClickHouse {#browse-clickhouse-source-code} - -Usted puede utilizar **Woboq** navegador de código en línea disponible [aqui](https://clickhouse.tech/codebrowser/html_report/ClickHouse/src/index.html). Proporciona navegación de código y resaltado semántico, búsqueda e indexación. La instantánea de código se actualiza diariamente. - -Además, puede navegar por las fuentes en [GitHub](https://github.com/ClickHouse/ClickHouse) como de costumbre. - -Si está interesado en qué IDE usar, recomendamos CLion, QT Creator, VS Code y KDevelop (con advertencias). Puedes usar cualquier IDE favorito. Vim y Emacs también cuentan. diff --git a/docs/es/development/build-cross-arm.md b/docs/es/development/build-cross-arm.md deleted file mode 100644 index 2758e9a0e94..00000000000 --- a/docs/es/development/build-cross-arm.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 67 -toc_title: "C\xF3mo construir ClickHouse en Linux para AARCH64 (ARM64)" ---- - -# Cómo construir ClickHouse en Linux para la arquitectura AARCH64 (ARM64) {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} - -Esto es para el caso cuando tiene una máquina Linux y desea usarla para compilar `clickhouse` binario que se ejecutará en otra máquina Linux con arquitectura de CPU AARCH64. Esto está destinado a las comprobaciones de integración continua que se ejecutan en servidores Linux. - -La compilación cruzada para AARCH64 se basa en el [Instrucciones de construcción](build.md), seguirlos primero. - -# Instalar Clang-8 {#install-clang-8} - -Siga las instrucciones de https://apt.llvm.org/ para la configuración de Ubuntu o Debian. -Por ejemplo, en Ubuntu Bionic puede usar los siguientes comandos: - -``` bash -echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list -sudo apt-get update -sudo apt-get install clang-8 -``` - -# Instalar conjunto de herramientas de compilación cruzada {#install-cross-compilation-toolset} - -``` bash -cd ClickHouse -mkdir -p build-aarch64/cmake/toolchain/linux-aarch64 -wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 -``` - -# Construir ClickHouse {#build-clickhouse} - -``` bash -cd ClickHouse -mkdir build-arm64 -CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake -ninja -C build-arm64 -``` - -El binario resultante se ejecutará solo en Linux con la arquitectura de CPU AARCH64. diff --git a/docs/es/development/build-cross-osx.md b/docs/es/development/build-cross-osx.md deleted file mode 100644 index d00e57c5d31..00000000000 --- a/docs/es/development/build-cross-osx.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 66 -toc_title: "C\xF3mo construir ClickHouse en Linux para Mac OS X" ---- - -# Cómo construir ClickHouse en Linux para Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x} - -Esto es para el caso cuando tiene una máquina Linux y desea usarla para compilar `clickhouse` Esto está destinado a las comprobaciones de integración continuas que se ejecutan en servidores Linux. Si desea crear ClickHouse directamente en Mac OS X, continúe con [otra instrucción](build-osx.md). - -La compilación cruzada para Mac OS X se basa en el [Instrucciones de construcción](build.md), seguirlos primero. - -# Instalar Clang-8 {#install-clang-8} - -Siga las instrucciones de https://apt.llvm.org/ para la configuración de Ubuntu o Debian. -Por ejemplo, los comandos para Bionic son como: - -``` bash -sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list -sudo apt-get install clang-8 -``` - -# Instalar conjunto de herramientas de compilación cruzada {#install-cross-compilation-toolset} - -Recordemos la ruta donde instalamos `cctools` como ${CCTOOLS} - -``` bash -mkdir ${CCTOOLS} - -git clone https://github.com/tpoechtrager/apple-libtapi.git -cd apple-libtapi -INSTALLPREFIX=${CCTOOLS} ./build.sh -./install.sh -cd .. - -git clone https://github.com/tpoechtrager/cctools-port.git -cd cctools-port/cctools -./configure --prefix=${CCTOOLS} --with-libtapi=${CCTOOLS} --target=x86_64-apple-darwin -make install -``` - -Además, necesitamos descargar macOS X SDK en el árbol de trabajo. - -``` bash -cd ClickHouse -wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz' -mkdir -p build-darwin/cmake/toolchain/darwin-x86_64 -tar xJf MacOSX10.15.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1 -``` - -# Construir ClickHouse {#build-clickhouse} - -``` bash -cd ClickHouse -mkdir build-osx -CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake \ - -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar \ - -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib \ - -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -ninja -C build-osx -``` - -El binario resultante tendrá un formato ejecutable Mach-O y no se puede ejecutar en Linux. diff --git a/docs/es/development/build-osx.md b/docs/es/development/build-osx.md deleted file mode 100644 index 39eba389798..00000000000 --- a/docs/es/development/build-osx.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 65 -toc_title: "C\xF3mo crear ClickHouse en Mac OS X" ---- - -# Cómo crear ClickHouse en Mac OS X {#how-to-build-clickhouse-on-mac-os-x} - -Build debería funcionar en Mac OS X 10.15 (Catalina) - -## Instalar Homebrew {#install-homebrew} - -``` bash -$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -``` - -## Instalar compiladores, herramientas y bibliotecas necesarios {#install-required-compilers-tools-and-libraries} - -``` bash -$ brew install cmake ninja libtool gettext -``` - -## Fuentes de ClickHouse de pago {#checkout-clickhouse-sources} - -``` bash -$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git -``` - -o - -``` bash -$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git - -$ cd ClickHouse -``` - -## Construir ClickHouse {#build-clickhouse} - -``` bash -$ mkdir build -$ cd build -$ cmake .. -DCMAKE_CXX_COMPILER=`which clang++` -DCMAKE_C_COMPILER=`which clang` -$ ninja -$ cd .. -``` - -## Advertencia {#caveats} - -Si tiene la intención de ejecutar clickhouse-server, asegúrese de aumentar la variable maxfiles del sistema. - -!!! info "Nota" - Tendrás que usar sudo. - -Para ello, cree el siguiente archivo: - -/Library/LaunchDaemons/limit.maxfiles.lista: - -``` xml - - - - - Label - limit.maxfiles - ProgramArguments - - launchctl - limit - maxfiles - 524288 - 524288 - - RunAtLoad - - ServiceIPC - - - -``` - -Ejecute el siguiente comando: - -``` bash -$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist -``` - -Reiniciar. - -Para verificar si está funcionando, puede usar `ulimit -n` comando. - -[Artículo Original](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/es/development/build.md b/docs/es/development/build.md deleted file mode 100644 index 42cd9b5433f..00000000000 --- a/docs/es/development/build.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 64 -toc_title: "C\xF3mo crear ClickHouse en Linux" ---- - -# Cómo construir ClickHouse para el desarrollo {#how-to-build-clickhouse-for-development} - -El siguiente tutorial se basa en el sistema Ubuntu Linux. -Con los cambios apropiados, también debería funcionar en cualquier otra distribución de Linux. -Plataformas compatibles: x86_64 y AArch64. El soporte para Power9 es experimental. - -## Instalar Git, CMake, Python y Ninja {#install-git-cmake-python-and-ninja} - -``` bash -$ sudo apt-get install git cmake python ninja-build -``` - -O cmake3 en lugar de cmake en sistemas más antiguos. - -## Instalar GCC 10 {#install-gcc-10} - -Hay varias formas de hacer esto. - -### Instalar desde un paquete PPA {#install-from-a-ppa-package} - -``` bash -$ sudo apt-get install software-properties-common -$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test -$ sudo apt-get update -$ sudo apt-get install gcc-10 g++-10 -``` - -### Instalar desde fuentes {#install-from-sources} - -Mira [Sistema abierto.](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) - -## Usar GCC 10 para compilaciones {#use-gcc-10-for-builds} - -``` bash -$ export CC=gcc-10 -$ export CXX=g++-10 -``` - -## Fuentes de ClickHouse de pago {#checkout-clickhouse-sources} - -``` bash -$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git -``` - -o - -``` bash -$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git -``` - -## Construir ClickHouse {#build-clickhouse} - -``` bash -$ cd ClickHouse -$ mkdir build -$ cd build -$ cmake .. -$ ninja -$ cd .. -``` - -Para crear un ejecutable, ejecute `ninja clickhouse`. -Esto creará el `programs/clickhouse` ejecutable, que se puede usar con `client` o `server` argumento. - -# Cómo construir ClickHouse en cualquier Linux {#how-to-build-clickhouse-on-any-linux} - -La compilación requiere los siguientes componentes: - -- Git (se usa solo para verificar las fuentes, no es necesario para la compilación) -- CMake 3.10 o más reciente -- Ninja (recomendado) o Hacer -- Compilador de C ++: gcc 10 o clang 8 o más reciente -- Enlazador: lld u oro (el clásico GNU ld no funcionará) -- Python (solo se usa dentro de la compilación LLVM y es opcional) - -Si todos los componentes están instalados, puede compilar de la misma manera que los pasos anteriores. - -Ejemplo para Ubuntu Eoan: - - sudo apt update - sudo apt install git cmake ninja-build g++ python - git clone --recursive https://github.com/ClickHouse/ClickHouse.git - mkdir build && cd build - cmake ../ClickHouse - ninja - -Ejemplo de OpenSUSE Tumbleweed: - - sudo zypper install git cmake ninja gcc-c++ python lld - git clone --recursive https://github.com/ClickHouse/ClickHouse.git - mkdir build && cd build - cmake ../ClickHouse - ninja - -Ejemplo de Fedora Rawhide: - - sudo yum update - yum --nogpg install git cmake make gcc-c++ python3 - git clone --recursive https://github.com/ClickHouse/ClickHouse.git - mkdir build && cd build - cmake ../ClickHouse - make -j $(nproc) - -# No tienes que construir ClickHouse {#you-dont-have-to-build-clickhouse} - -ClickHouse está disponible en binarios y paquetes preconstruidos. Los binarios son portátiles y se pueden ejecutar en cualquier tipo de Linux. - -Están diseñados para lanzamientos estables, preestablecidos y de prueba, siempre que para cada compromiso con el maestro y para cada solicitud de extracción. - -Para encontrar la construcción más fresca de `master`, ir a [se compromete página](https://github.com/ClickHouse/ClickHouse/commits/master), haga clic en la primera marca de verificación verde o cruz roja cerca de confirmar, y haga clic en “Details” enlace justo después “ClickHouse Build Check”. - -# Cómo construir el paquete Debian ClickHouse {#how-to-build-clickhouse-debian-package} - -## Instalar Git y Pbuilder {#install-git-and-pbuilder} - -``` bash -$ sudo apt-get update -$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring -``` - -## Fuentes de ClickHouse de pago {#checkout-clickhouse-sources-1} - -``` bash -$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git -$ cd ClickHouse -``` - -## Ejecutar secuencia de comandos de lanzamiento {#run-release-script} - -``` bash -$ ./release -``` - -[Artículo Original](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/es/development/contrib.md b/docs/es/development/contrib.md deleted file mode 100644 index 3f3013570e5..00000000000 --- a/docs/es/development/contrib.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 70 -toc_title: Bibliotecas de terceros utilizadas ---- - -# Bibliotecas de terceros utilizadas {#third-party-libraries-used} - -| Biblioteca | Licencia | -|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------| -| base64 | [Licencia BSD de 2 cláusulas](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | -| impulsar | [Licencia de software Boost 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | -| Bienvenido | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | -| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | -| Cctz | [Licencia Apache 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | -| doble conversión | [Licencia de 3 cláusulas BSD](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | -| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | -| Más información | [Licencia de 3 cláusulas BSD](https://github.com/google/googletest/blob/master/LICENSE) | -| H3 | [Licencia Apache 2.0](https://github.com/uber/h3/blob/master/LICENSE) | -| hyperscan | [Licencia de 3 cláusulas BSD](https://github.com/intel/hyperscan/blob/master/LICENSE) | -| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | -| libdivide | [Licencia Zlib](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | -| libgsasl | [Información adicional](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | -| libhdfs3 | [Licencia Apache 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | -| libmetrohash | [Licencia Apache 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | -| libpcg-al azar | [Licencia Apache 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | -| Libressl | [Licencia OpenSSL](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | -| Librdkafka | [Licencia BSD de 2 cláusulas](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | -| libwidechar_width | [CC0 1.0 Universal](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | -| llvm | [Licencia de 3 cláusulas BSD](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | -| lz4 | [Licencia BSD de 2 cláusulas](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | -| mariadb-conector-c | [Información adicional](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | -| murmurhash | [Dominio público](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | -| pdqsort | [Licencia Zlib](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | -| Poco | [Boost Software License - Versión 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | -| protobuf | [Licencia de 3 cláusulas BSD](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | -| Re2 | [Licencia de 3 cláusulas BSD](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | -| UnixODBC | [Información adicional](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | -| Sistema abierto. | [Licencia Zlib](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | -| zstd | [Licencia de 3 cláusulas BSD](https://github.com/facebook/zstd/blob/dev/LICENSE) | diff --git a/docs/es/development/developer-instruction.md b/docs/es/development/developer-instruction.md deleted file mode 100644 index 0ce5d0b457a..00000000000 --- a/docs/es/development/developer-instruction.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 61 -toc_title: "La instrucci\xF3n para desarrolladores de ClickHouse para principiantes" ---- - -La construcción de ClickHouse es compatible con Linux, FreeBSD y Mac OS X. - -# Si utiliza Windows {#if-you-use-windows} - -Si usa Windows, necesita crear una máquina virtual con Ubuntu. Para comenzar a trabajar con una máquina virtual, instale VirtualBox. Puede descargar Ubuntu desde el sitio web: https://www.ubuntu.com/#download. Por favor, cree una máquina virtual a partir de la imagen descargada (debe reservar al menos 4 GB de RAM para ello). Para ejecutar un terminal de línea de comandos en Ubuntu, busque un programa que contenga la palabra “terminal” en su nombre (gnome-terminal, konsole etc.) o simplemente presione Ctrl + Alt + T. - -# Si utiliza un sistema de 32 bits {#if-you-use-a-32-bit-system} - -ClickHouse no puede funcionar ni construir en un sistema de 32 bits. Debe adquirir acceso a un sistema de 64 bits y puede continuar leyendo. - -# Creación de un repositorio en GitHub {#creating-a-repository-on-github} - -Para comenzar a trabajar con el repositorio de ClickHouse, necesitará una cuenta de GitHub. - -Probablemente ya tenga uno, pero si no lo hace, regístrese en https://github.com . En caso de que no tenga claves SSH, debe generarlas y luego cargarlas en GitHub. Es necesario para enviar a través de sus parches. También es posible usar las mismas claves SSH que usa con cualquier otro servidor SSH, probablemente ya las tenga. - -Cree una bifurcación del repositorio ClickHouse. Para hacerlo por favor haga clic en el “fork” botón en la esquina superior derecha en https://github.com/ClickHouse/ClickHouse . Se bifurcará su propia copia de ClickHouse/ClickHouse a su cuenta. - -El proceso de desarrollo consiste en comprometer primero los cambios previstos en su bifurcación de ClickHouse y luego crear un “pull request” para que estos cambios sean aceptados en el repositorio principal (ClickHouse / ClickHouse). - -Para trabajar con repositorios git, instale `git`. - -Para hacer eso en Ubuntu, ejecutaría en la terminal de línea de comandos: - - sudo apt update - sudo apt install git - -Puede encontrar un breve manual sobre el uso de Git aquí: https://education.github.com/git-cheat-sheet-education.pdf . -Para obtener un manual detallado sobre Git, consulte https://git-scm.com/book/en/v2 . - -# Clonación de un repositorio en su máquina de desarrollo {#cloning-a-repository-to-your-development-machine} - -A continuación, debe descargar los archivos fuente en su máquina de trabajo. Esto se llama “to clone a repository” porque crea una copia local del repositorio en su máquina de trabajo. - -En el terminal de línea de comandos, ejecute: - - git clone --recursive git@github.com:your_github_username/ClickHouse.git - cd ClickHouse - -Nota: por favor, sustituye *your_github_username* con lo que es apropiado! - -Este comando creará un directorio `ClickHouse` que contiene la copia de trabajo del proyecto. - -Es importante que la ruta al directorio de trabajo no contenga espacios en blanco, ya que puede ocasionar problemas con la ejecución del sistema de compilación. - -Tenga en cuenta que el repositorio ClickHouse utiliza `submodules`. That is what the references to additional repositories are called (i.e. external libraries on which the project depends). It means that when cloning the repository you need to specify the `--recursive` como en el ejemplo anterior. Si el repositorio se ha clonado sin submódulos, para descargarlos debe ejecutar lo siguiente: - - git submodule init - git submodule update - -Puede verificar el estado con el comando: `git submodule status`. - -Si recibe el siguiente mensaje de error: - - Permission denied (publickey). - fatal: Could not read from remote repository. - - Please make sure you have the correct access rights - and the repository exists. - -Por lo general, significa que faltan las claves SSH para conectarse a GitHub. Estas teclas se encuentran normalmente en `~/.ssh`. Para que las claves SSH sean aceptadas, debe cargarlas en la sección de configuración de la interfaz de usuario de GitHub. - -También puede clonar el repositorio a través del protocolo https: - - git clone https://github.com/ClickHouse/ClickHouse.git - -Sin embargo, esto no le permitirá enviar los cambios al servidor. Aún puede usarlo temporalmente y agregar las claves SSH más tarde reemplazando la dirección remota del repositorio con `git remote` comando. - -También puede agregar la dirección original del repositorio de ClickHouse a su repositorio local para extraer actualizaciones desde allí: - - git remote add upstream git@github.com:ClickHouse/ClickHouse.git - -Después de ejecutar con éxito este comando, podrá extraer actualizaciones del repositorio principal de ClickHouse ejecutando `git pull upstream master`. - -## Trabajar con submódulos {#working-with-submodules} - -Trabajar con submódulos en git podría ser doloroso. Los siguientes comandos ayudarán a administrarlo: - - # ! each command accepts --recursive - # Update remote URLs for submodules. Barely rare case - git submodule sync - # Add new submodules - git submodule init - # Update existing submodules to the current state - git submodule update - # Two last commands could be merged together - git submodule update --init - -Los siguientes comandos le ayudarían a restablecer todos los submódulos al estado inicial (!¡ADVERTENCIA! - cualquier cambio en el interior será eliminado): - - # Synchronizes submodules' remote URL with .gitmodules - git submodule sync --recursive - # Update the registered submodules with initialize not yet initialized - git submodule update --init --recursive - # Reset all changes done after HEAD - git submodule foreach git reset --hard - # Clean files from .gitignore - git submodule foreach git clean -xfd - # Repeat last 4 commands for all submodule - git submodule foreach git submodule sync --recursive - git submodule foreach git submodule update --init --recursive - git submodule foreach git submodule foreach git reset --hard - git submodule foreach git submodule foreach git clean -xfd - -# Sistema de construcción {#build-system} - -ClickHouse utiliza CMake y Ninja para la construcción. - -CMake - un sistema de meta-construcción que puede generar archivos Ninja (tareas de construcción). -Ninja: un sistema de compilación más pequeño con un enfoque en la velocidad utilizada para ejecutar esas tareas generadas por cmake. - -Para instalar en Ubuntu, Debian o Mint run `sudo apt install cmake ninja-build`. - -En CentOS, RedHat se ejecuta `sudo yum install cmake ninja-build`. - -Si usa Arch o Gentoo, probablemente lo sepa usted mismo cómo instalar CMake. - -Para instalar CMake y Ninja en Mac OS X, primero instale Homebrew y luego instale todo lo demás a través de brew: - - /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" - brew install cmake ninja - -A continuación, verifique la versión de CMake: `cmake --version`. Si está por debajo de 3.3, debe instalar una versión más reciente desde el sitio web: https://cmake.org/download/. - -# Bibliotecas externas opcionales {#optional-external-libraries} - -ClickHouse utiliza varias bibliotecas externas para la construcción. Todos ellos no necesitan ser instalados por separado, ya que se construyen junto con ClickHouse a partir de las fuentes ubicadas en los submódulos. Puede consultar la lista en `contrib`. - -# Compilador de C ++ {#c-compiler} - -Los compiladores GCC a partir de la versión 10 y Clang versión 8 o superior son compatibles para construir ClickHouse. - -Las compilaciones oficiales de Yandex actualmente usan GCC porque genera código de máquina de un rendimiento ligeramente mejor (con una diferencia de hasta varios por ciento según nuestros puntos de referencia). Y Clang es más conveniente para el desarrollo generalmente. Sin embargo, nuestra plataforma de integración continua (CI) ejecuta verificaciones de aproximadamente una docena de combinaciones de compilación. - -Para instalar GCC en Ubuntu, ejecute: `sudo apt install gcc g++` - -Compruebe la versión de gcc: `gcc --version`. Si está por debajo de 9, siga las instrucciones aquí: https://clickhouse.tech/docs/es/development/build/#install-gcc-10. - -La compilación de Mac OS X solo es compatible con Clang. Sólo tiene que ejecutar `brew install llvm` - -Si decide utilizar Clang, también puede instalar `libc++` y `lld` si usted sabe lo que es. Utilizar `ccache` también se recomienda. - -# El proceso de construcción {#the-building-process} - -Ahora que está listo para construir ClickHouse, le recomendamos que cree un directorio separado `build` dentro `ClickHouse` que contendrá todos los de la generación de artefactos: - - mkdir build - cd build - -Puede tener varios directorios diferentes (build_release, build_debug, etc.) para diferentes tipos de construcción. - -Mientras que dentro de la `build` directorio, configure su compilación ejecutando CMake. Antes de la primera ejecución, debe definir variables de entorno que especifiquen el compilador (compilador gcc versión 10 en este ejemplo). - -Linux: - - export CC=gcc-10 CXX=g++-10 - cmake .. - -Mac OS X: - - export CC=clang CXX=clang++ - cmake .. - -El `CC` variable especifica el compilador para C (abreviatura de C Compiler), y `CXX` variable indica qué compilador de C ++ se usará para compilar. - -Para una construcción más rápida, puede recurrir al `debug` tipo de compilación: una compilación sin optimizaciones. Para ese suministro el siguiente parámetro `-D CMAKE_BUILD_TYPE=Debug`: - - cmake -D CMAKE_BUILD_TYPE=Debug .. - -Puede cambiar el tipo de compilación ejecutando este comando en el `build` directorio. - -Ejecutar ninja para construir: - - ninja clickhouse-server clickhouse-client - -Solo los binarios requeridos se van a construir en este ejemplo. - -Si necesita construir todos los binarios (utilidades y pruebas), debe ejecutar ninja sin parámetros: - - ninja - -La compilación completa requiere aproximadamente 30 GB de espacio libre en disco o 15 GB para construir los binarios principales. - -Cuando hay una gran cantidad de RAM disponible en la máquina de compilación, debe limitar el número de tareas de compilación que se ejecutan en paralelo con `-j` parámetro: - - ninja -j 1 clickhouse-server clickhouse-client - -En máquinas con 4GB de RAM, se recomienda especificar 1, para 8GB de RAM `-j 2` se recomienda. - -Si recibe el mensaje: `ninja: error: loading 'build.ninja': No such file or directory`, significa que la generación de una configuración de compilación ha fallado y necesita inspeccionar el mensaje anterior. - -Cuando se inicie correctamente el proceso de construcción, verá el progreso de la compilación: el número de tareas procesadas y el número total de tareas. - -Al crear mensajes sobre archivos protobuf en la biblioteca libhdfs2, como `libprotobuf WARNING` puede aparecer. Afectan a nada y son seguros para ser ignorado. - -Tras la compilación exitosa, obtienes un archivo ejecutable `ClickHouse//programs/clickhouse`: - - ls -l programs/clickhouse - -# Ejecución del ejecutable construido de ClickHouse {#running-the-built-executable-of-clickhouse} - -Para ejecutar el servidor bajo el usuario actual, debe navegar hasta `ClickHouse/programs/server/` (situado fuera de `build`) y ejecutar: - - ../../build/programs/clickhouse server - -En este caso, ClickHouse usará archivos de configuración ubicados en el directorio actual. Puede ejecutar `clickhouse server` desde cualquier directorio que especifique la ruta a un archivo de configuración como un parámetro de línea de comandos `--config-file`. - -Para conectarse a ClickHouse con clickhouse-client en otro terminal, vaya a `ClickHouse/build/programs/` y ejecutar `./clickhouse client`. - -Si usted consigue `Connection refused` mensaje en Mac OS X o FreeBSD, intente especificar la dirección de host 127.0.0.1: - - clickhouse client --host 127.0.0.1 - -Puede reemplazar la versión de producción del binario ClickHouse instalado en su sistema con su binario ClickHouse personalizado. Para ello, instale ClickHouse en su máquina siguiendo las instrucciones del sitio web oficial. A continuación, ejecute lo siguiente: - - sudo service clickhouse-server stop - sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ - sudo service clickhouse-server start - -Tenga en cuenta que `clickhouse-client`, `clickhouse-server` y otros son enlaces simbólicos a los comúnmente compartidos `clickhouse` binario. - -También puede ejecutar su binario ClickHouse personalizado con el archivo de configuración del paquete ClickHouse instalado en su sistema: - - sudo service clickhouse-server stop - sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml - -# IDE (entorno de desarrollo integrado) {#ide-integrated-development-environment} - -Si no sabe qué IDE usar, le recomendamos que use CLion. CLion es un software comercial, pero ofrece un período de prueba gratuito de 30 días. También es gratuito para los estudiantes. CLion se puede usar tanto en Linux como en Mac OS X. - -KDevelop y QTCreator son otras excelentes alternativas de un IDE para desarrollar ClickHouse. KDevelop viene como un IDE muy útil aunque inestable. Si KDevelop se bloquea después de un tiempo al abrir el proyecto, debe hacer clic “Stop All” botón tan pronto como se ha abierto la lista de archivos del proyecto. Después de hacerlo, KDevelop debería estar bien para trabajar. - -Como editores de código simples, puede usar Sublime Text o Visual Studio Code, o Kate (todos los cuales están disponibles en Linux). - -Por si acaso, vale la pena mencionar que CLion crea `build` por sí mismo, también por sí mismo selecciona `debug` para el tipo de compilación, para la configuración usa una versión de CMake que está definida en CLion y no la instalada por usted, y finalmente, CLion usará `make` para ejecutar tareas de compilación en lugar de `ninja`. Este es un comportamiento normal, solo tenlo en cuenta para evitar confusiones. - -# Código de escritura {#writing-code} - -La descripción de la arquitectura ClickHouse se puede encontrar aquí: https://clickhouse.tech/docs/es/desarrollo/arquitectura/ - -La Guía de estilo de código: https://clickhouse.tech/docs/en/development/style/ - -Pruebas de escritura: https://clickhouse.tech/docs/en/development/tests/ - -Lista de tareas: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22 - -# Datos de prueba {#test-data} - -El desarrollo de ClickHouse a menudo requiere cargar conjuntos de datos realistas. Es particularmente importante para las pruebas de rendimiento. Tenemos un conjunto especialmente preparado de datos anónimos de Yandex.Métrica. Se requiere, además, unos 3 GB de espacio libre en disco. Tenga en cuenta que estos datos no son necesarios para realizar la mayoría de las tareas de desarrollo. - - sudo apt install wget xz-utils - - wget https://datasets.clickhouse.tech/hits/tsv/hits_v1.tsv.xz - wget https://datasets.clickhouse.tech/visits/tsv/visits_v1.tsv.xz - - xz -v -d hits_v1.tsv.xz - xz -v -d visits_v1.tsv.xz - - clickhouse-client - - CREATE DATABASE IF NOT EXISTS test - - CREATE TABLE test.hits ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID), EventTime); - - CREATE TABLE test.visits ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), `Goals.ID` Array(UInt32), `Goals.Serial` Array(UInt32), `Goals.EventTime` Array(DateTime), `Goals.Price` Array(Int64), `Goals.OrderID` Array(String), `Goals.CurrencyID` Array(UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, `TraficSource.ID` Array(Int8), `TraficSource.SearchEngineID` Array(UInt16), `TraficSource.AdvEngineID` Array(UInt8), `TraficSource.PlaceID` Array(UInt16), `TraficSource.SocialSourceNetworkID` Array(UInt8), `TraficSource.Domain` Array(String), `TraficSource.SearchPhrase` Array(String), `TraficSource.SocialSourcePage` Array(String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `Market.Type` Array(UInt8), `Market.GoalID` Array(UInt32), `Market.OrderID` Array(String), `Market.OrderPrice` Array(Int64), `Market.PP` Array(UInt32), `Market.DirectPlaceID` Array(UInt32), `Market.DirectOrderID` Array(UInt32), `Market.DirectBannerID` Array(UInt32), `Market.GoodID` Array(String), `Market.GoodName` Array(String), `Market.GoodQuantity` Array(Int32), `Market.GoodPrice` Array(Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) SAMPLE BY intHash32(UserID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID); - - clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv - clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv - -# Creación de solicitud de extracción {#creating-pull-request} - -Navega a tu repositorio de fork en la interfaz de usuario de GitHub. Si ha estado desarrollando en una sucursal, debe seleccionar esa sucursal. Habrá un “Pull request” botón situado en la pantalla. En esencia, esto significa “create a request for accepting my changes into the main repository”. - -Se puede crear una solicitud de extracción incluso si el trabajo aún no se ha completado. En este caso, por favor ponga la palabra “WIP” (trabajo en curso) al comienzo del título, se puede cambiar más tarde. Esto es útil para la revisión cooperativa y la discusión de los cambios, así como para ejecutar todas las pruebas disponibles. Es importante que proporcione una breve descripción de sus cambios, que más tarde se utilizará para generar registros de cambios de lanzamiento. - -Las pruebas comenzarán tan pronto como los empleados de Yandex etiqueten su PR con una etiqueta “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. - -El sistema preparará compilaciones binarias ClickHouse para su solicitud de extracción individualmente. Para recuperar estas compilaciones, haga clic en “Details” junto al link “ClickHouse build check” en la lista de cheques. Allí encontrará enlaces directos a la construcción.deb paquetes de ClickHouse que puede implementar incluso en sus servidores de producción (si no tiene miedo). - -Lo más probable es que algunas de las compilaciones fallen las primeras veces. Esto se debe al hecho de que verificamos las compilaciones tanto con gcc como con clang, con casi todas las advertencias existentes (siempre con el `-Werror` bandera) habilitado para sonido. En esa misma página, puede encontrar todos los registros de compilación para que no tenga que compilar ClickHouse de todas las formas posibles. diff --git a/docs/es/development/index.md b/docs/es/development/index.md deleted file mode 100644 index 6f96f9b3f02..00000000000 --- a/docs/es/development/index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Desarrollo -toc_hidden: true -toc_priority: 58 -toc_title: oculto ---- - -# Desarrollo de ClickHouse {#clickhouse-development} - -[Artículo Original](https://clickhouse.tech/docs/en/development/) diff --git a/docs/es/development/style.md b/docs/es/development/style.md deleted file mode 100644 index ec55516fe2c..00000000000 --- a/docs/es/development/style.md +++ /dev/null @@ -1,841 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 68 -toc_title: "C\xF3mo escribir c\xF3digo C ++" ---- - -# Cómo escribir código C ++ {#how-to-write-c-code} - -## Recomendaciones generales {#general-recommendations} - -**1.** Las siguientes son recomendaciones, no requisitos. - -**2.** Si está editando código, tiene sentido seguir el formato del código existente. - -**3.** El estilo de código es necesario para la coherencia. La consistencia facilita la lectura del código y también facilita la búsqueda del código. - -**4.** Muchas de las reglas no tienen razones lógicas; están dictadas por prácticas establecidas. - -## Formatear {#formatting} - -**1.** La mayor parte del formato se realizará automáticamente por `clang-format`. - -**2.** Las sangrías son 4 espacios. Configure el entorno de desarrollo para que una pestaña agregue cuatro espacios. - -**3.** Abrir y cerrar llaves deben estar en una línea separada. - -``` cpp -inline void readBoolText(bool & x, ReadBuffer & buf) -{ - char tmp = '0'; - readChar(tmp, buf); - x = tmp != '0'; -} -``` - -**4.** Si todo el cuerpo de la función es `statement`, se puede colocar en una sola línea. Coloque espacios alrededor de llaves (además del espacio al final de la línea). - -``` cpp -inline size_t mask() const { return buf_size() - 1; } -inline size_t place(HashValue x) const { return x & mask(); } -``` - -**5.** Para funciones. No coloque espacios alrededor de los corchetes. - -``` cpp -void reinsert(const Value & x) -``` - -``` cpp -memcpy(&buf[place_value], &x, sizeof(x)); -``` - -**6.** En `if`, `for`, `while` y otras expresiones, se inserta un espacio delante del corchete de apertura (a diferencia de las llamadas a funciones). - -``` cpp -for (size_t i = 0; i < rows; i += storage.index_granularity) -``` - -**7.** Agregar espacios alrededor de los operadores binarios (`+`, `-`, `*`, `/`, `%`, …) and the ternary operator `?:`. - -``` cpp -UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); -UInt8 month = (s[5] - '0') * 10 + (s[6] - '0'); -UInt8 day = (s[8] - '0') * 10 + (s[9] - '0'); -``` - -**8.** Si se introduce un avance de línea, coloque al operador en una nueva línea y aumente la sangría antes de ella. - -``` cpp -if (elapsed_ns) - message << " (" - << rows_read_on_server * 1000000000 / elapsed_ns << " rows/s., " - << bytes_read_on_server * 1000.0 / elapsed_ns << " MB/s.) "; -``` - -**9.** Puede utilizar espacios para la alineación dentro de una línea, si lo desea. - -``` cpp -dst.ClickLogID = click.LogID; -dst.ClickEventID = click.EventID; -dst.ClickGoodEvent = click.GoodEvent; -``` - -**10.** No use espacios alrededor de los operadores `.`, `->`. - -Si es necesario, el operador se puede envolver a la siguiente línea. En este caso, el desplazamiento frente a él aumenta. - -**11.** No utilice un espacio para separar los operadores unarios (`--`, `++`, `*`, `&`, …) from the argument. - -**12.** Pon un espacio después de una coma, pero no antes. La misma regla se aplica a un punto y coma dentro de un `for` expresion. - -**13.** No utilice espacios para separar el `[]` operador. - -**14.** En un `template <...>` expresión, use un espacio entre `template` y `<`; sin espacios después de `<` o antes `>`. - -``` cpp -template -struct AggregatedStatElement -{} -``` - -**15.** En clases y estructuras, escribe `public`, `private`, y `protected` en el mismo nivel que `class/struct`, y sangrar el resto del código. - -``` cpp -template -class MultiVersion -{ -public: - /// Version of object for usage. shared_ptr manage lifetime of version. - using Version = std::shared_ptr; - ... -} -``` - -**16.** Si el mismo `namespace` se usa para todo el archivo, y no hay nada más significativo, no es necesario un desplazamiento dentro `namespace`. - -**17.** Si el bloque para un `if`, `for`, `while`, u otra expresión consiste en una sola `statement`, las llaves son opcionales. Coloque el `statement` en una línea separada, en su lugar. Esta regla también es válida para `if`, `for`, `while`, … - -Pero si el interior `statement` contiene llaves o `else`, el bloque externo debe escribirse entre llaves. - -``` cpp -/// Finish write. -for (auto & stream : streams) - stream.second->finalize(); -``` - -**18.** No debería haber espacios al final de las líneas. - -**19.** Los archivos de origen están codificados en UTF-8. - -**20.** Los caracteres no ASCII se pueden usar en literales de cadena. - -``` cpp -<< ", " << (timer.elapsed() / chunks_stats.hits) << " μsec/hit."; -``` - -**21.** No escriba varias expresiones en una sola línea. - -**22.** Agrupe secciones de código dentro de las funciones y sepárelas con no más de una línea vacía. - -**23.** Separe funciones, clases, etc. con una o dos líneas vacías. - -**24.** `A const` (relacionado con un valor) debe escribirse antes del nombre del tipo. - -``` cpp -//correct -const char * pos -const std::string & s -//incorrect -char const * pos -``` - -**25.** Al declarar un puntero o referencia, el `*` y `&` Los símbolos deben estar separados por espacios en ambos lados. - -``` cpp -//correct -const char * pos -//incorrect -const char* pos -const char *pos -``` - -**26.** Cuando utilice tipos de plantilla, alias con el `using` palabra clave (excepto en los casos más simples). - -En otras palabras, los parámetros de la plantilla se especifican solo en `using` y no se repiten en el código. - -`using` se puede declarar localmente, como dentro de una función. - -``` cpp -//correct -using FileStreams = std::map>; -FileStreams streams; -//incorrect -std::map> streams; -``` - -**27.** No declare varias variables de diferentes tipos en una instrucción. - -``` cpp -//incorrect -int x, *y; -``` - -**28.** No utilice moldes de estilo C. - -``` cpp -//incorrect -std::cerr << (int)c <<; std::endl; -//correct -std::cerr << static_cast(c) << std::endl; -``` - -**29.** En clases y estructuras, los miembros del grupo y las funciones por separado dentro de cada ámbito de visibilidad. - -**30.** Para clases y estructuras pequeñas, no es necesario separar la declaración del método de la implementación. - -Lo mismo es cierto para los métodos pequeños en cualquier clase o estructura. - -Para clases y estructuras con plantillas, no separe las declaraciones de métodos de la implementación (porque de lo contrario deben definirse en la misma unidad de traducción). - -**31.** Puede ajustar líneas en 140 caracteres, en lugar de 80. - -**32.** Utilice siempre los operadores de incremento / decremento de prefijo si no se requiere postfix. - -``` cpp -for (Names::const_iterator it = column_names.begin(); it != column_names.end(); ++it) -``` - -## Comentario {#comments} - -**1.** Asegúrese de agregar comentarios para todas las partes no triviales del código. - -Esto es muy importante. Escribir el comentario puede ayudarte a darte cuenta de que el código no es necesario o que está diseñado incorrectamente. - -``` cpp -/** Part of piece of memory, that can be used. - * For example, if internal_buffer is 1MB, and there was only 10 bytes loaded to buffer from file for reading, - * then working_buffer will have size of only 10 bytes - * (working_buffer.end() will point to position right after those 10 bytes available for read). - */ -``` - -**2.** Los comentarios pueden ser tan detallados como sea necesario. - -**3.** Coloque comentarios antes del código que describen. En casos raros, los comentarios pueden aparecer después del código, en la misma línea. - -``` cpp -/** Parses and executes the query. -*/ -void executeQuery( - ReadBuffer & istr, /// Where to read the query from (and data for INSERT, if applicable) - WriteBuffer & ostr, /// Where to write the result - Context & context, /// DB, tables, data types, engines, functions, aggregate functions... - BlockInputStreamPtr & query_plan, /// Here could be written the description on how query was executed - QueryProcessingStage::Enum stage = QueryProcessingStage::Complete /// Up to which stage process the SELECT query - ) -``` - -**4.** Los comentarios deben escribirse en inglés solamente. - -**5.** Si está escribiendo una biblioteca, incluya comentarios detallados que la expliquen en el archivo de encabezado principal. - -**6.** No agregue comentarios que no proporcionen información adicional. En particular, no deje comentarios vacíos como este: - -``` cpp -/* -* Procedure Name: -* Original procedure name: -* Author: -* Date of creation: -* Dates of modification: -* Modification authors: -* Original file name: -* Purpose: -* Intent: -* Designation: -* Classes used: -* Constants: -* Local variables: -* Parameters: -* Date of creation: -* Purpose: -*/ -``` - -El ejemplo se toma prestado del recurso http://home.tamk.fi/~jaalto/course/coding-style/doc/unmaintainable-code/. - -**7.** No escriba comentarios de basura (autor, fecha de creación ..) al principio de cada archivo. - -**8.** Los comentarios de una sola línea comienzan con tres barras: `///` y los comentarios de varias líneas comienzan con `/**`. Estos comentarios son considerados “documentation”. - -Nota: Puede usar Doxygen para generar documentación a partir de estos comentarios. Pero Doxygen no se usa generalmente porque es más conveniente navegar por el código en el IDE. - -**9.** Los comentarios de varias líneas no deben tener líneas vacías al principio y al final (excepto la línea que cierra un comentario de varias líneas). - -**10.** Para comentar el código, use comentarios básicos, no “documenting” comentario. - -**11.** Elimine las partes comentadas del código antes de confirmar. - -**12.** No use blasfemias en comentarios o código. - -**13.** No use letras mayúsculas. No use puntuación excesiva. - -``` cpp -/// WHAT THE FAIL??? -``` - -**14.** No use comentarios para hacer delímetros. - -``` cpp -///****************************************************** -``` - -**15.** No comiencen las discusiones en los comentarios. - -``` cpp -/// Why did you do this stuff? -``` - -**16.** No es necesario escribir un comentario al final de un bloque que describa de qué se trataba. - -``` cpp -/// for -``` - -## Nombre {#names} - -**1.** Use letras minúsculas con guiones bajos en los nombres de variables y miembros de clase. - -``` cpp -size_t max_block_size; -``` - -**2.** Para los nombres de las funciones (métodos), use camelCase comenzando con una letra minúscula. - -``` cpp -std::string getName() const override { return "Memory"; } -``` - -**3.** Para los nombres de las clases (estructuras), use CamelCase comenzando con una letra mayúscula. Los prefijos distintos de I no se usan para interfaces. - -``` cpp -class StorageMemory : public IStorage -``` - -**4.** `using` se nombran de la misma manera que las clases, o con `_t` al final. - -**5.** Nombres de argumentos de tipo de plantilla: en casos simples, use `T`; `T`, `U`; `T1`, `T2`. - -Para casos más complejos, siga las reglas para los nombres de clase o agregue el prefijo `T`. - -``` cpp -template -struct AggregatedStatElement -``` - -**6.** Nombres de argumentos constantes de plantilla: siga las reglas para los nombres de variables o use `N` en casos simples. - -``` cpp -template -struct ExtractDomain -``` - -**7.** Para clases abstractas (interfaces) puede agregar el `I` prefijo. - -``` cpp -class IBlockInputStream -``` - -**8.** Si usa una variable localmente, puede usar el nombre corto. - -En todos los demás casos, use un nombre que describa el significado. - -``` cpp -bool info_successfully_loaded = false; -``` - -**9.** Nombres de `define`s y las constantes globales usan ALL_CAPS con guiones bajos. - -``` cpp -#define MAX_SRC_TABLE_NAMES_TO_STORE 1000 -``` - -**10.** Los nombres de archivo deben usar el mismo estilo que su contenido. - -Si un archivo contiene una sola clase, nombre el archivo de la misma manera que la clase (CamelCase). - -Si el archivo contiene una sola función, nombre el archivo de la misma manera que la función (camelCase). - -**11.** Si el nombre contiene una abreviatura, : - -- Para los nombres de variables, la abreviatura debe usar letras minúsculas `mysql_connection` (ni `mySQL_connection`). -- Para los nombres de clases y funciones, mantenga las letras mayúsculas en la abreviatura`MySQLConnection` (ni `MySqlConnection`). - -**12.** Los argumentos del constructor que se usan solo para inicializar los miembros de la clase deben nombrarse de la misma manera que los miembros de la clase, pero con un guión bajo al final. - -``` cpp -FileQueueProcessor( - const std::string & path_, - const std::string & prefix_, - std::shared_ptr handler_) - : path(path_), - prefix(prefix_), - handler(handler_), - log(&Logger::get("FileQueueProcessor")) -{ -} -``` - -El sufijo de subrayado se puede omitir si el argumento no se usa en el cuerpo del constructor. - -**13.** No hay diferencia en los nombres de las variables locales y los miembros de la clase (no se requieren prefijos). - -``` cpp -timer (not m_timer) -``` - -**14.** Para las constantes en un `enum`, usar CamelCase con una letra mayúscula. ALL_CAPS también es aceptable. Si el `enum` no es local, utilice un `enum class`. - -``` cpp -enum class CompressionMethod -{ - QuickLZ = 0, - LZ4 = 1, -}; -``` - -**15.** Todos los nombres deben estar en inglés. La transliteración de palabras rusas no está permitida. - - not Stroka - -**16.** Las abreviaturas son aceptables si son bien conocidas (cuando puede encontrar fácilmente el significado de la abreviatura en Wikipedia o en un motor de búsqueda). - - `AST`, `SQL`. - - Not `NVDH` (some random letters) - -Las palabras incompletas son aceptables si la versión abreviada es de uso común. - -También puede usar una abreviatura si el nombre completo se incluye junto a él en los comentarios. - -**17.** Los nombres de archivo con código fuente de C++ deben tener `.cpp` ampliación. Los archivos de encabezado deben tener `.h` ampliación. - -## Cómo escribir código {#how-to-write-code} - -**1.** Gestión de la memoria. - -Desasignación de memoria manual (`delete`) solo se puede usar en el código de la biblioteca. - -En el código de la biblioteca, el `delete` operador sólo se puede utilizar en destructores. - -En el código de la aplicación, la memoria debe ser liberada por el objeto que la posee. - -Ejemplos: - -- La forma más fácil es colocar un objeto en la pila o convertirlo en miembro de otra clase. -- Para una gran cantidad de objetos pequeños, use contenedores. -- Para la desasignación automática de un pequeño número de objetos que residen en el montón, use `shared_ptr/unique_ptr`. - -**2.** Gestión de recursos. - -Utilizar `RAII` y ver arriba. - -**3.** Manejo de errores. - -Utilice excepciones. En la mayoría de los casos, solo necesita lanzar una excepción y no necesita atraparla (debido a `RAII`). - -En las aplicaciones de procesamiento de datos fuera de línea, a menudo es aceptable no detectar excepciones. - -En los servidores que manejan las solicitudes de los usuarios, generalmente es suficiente detectar excepciones en el nivel superior del controlador de conexión. - -En las funciones de subproceso, debe capturar y mantener todas las excepciones para volver a lanzarlas en el subproceso principal después `join`. - -``` cpp -/// If there weren't any calculations yet, calculate the first block synchronously -if (!started) -{ - calculate(); - started = true; -} -else /// If calculations are already in progress, wait for the result - pool.wait(); - -if (exception) - exception->rethrow(); -``` - -Nunca oculte excepciones sin manejo. Nunca simplemente ponga ciegamente todas las excepciones para iniciar sesión. - -``` cpp -//Not correct -catch (...) {} -``` - -Si necesita ignorar algunas excepciones, hágalo solo para las específicas y vuelva a lanzar el resto. - -``` cpp -catch (const DB::Exception & e) -{ - if (e.code() == ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION) - return nullptr; - else - throw; -} -``` - -Al usar funciones con códigos de respuesta o `errno`, siempre verifique el resultado y arroje una excepción en caso de error. - -``` cpp -if (0 != close(fd)) - throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE); -``` - -`Do not use assert`. - -**4.** Tipos de excepción. - -No es necesario utilizar una jerarquía de excepciones compleja en el código de la aplicación. El texto de excepción debe ser comprensible para un administrador del sistema. - -**5.** Lanzar excepciones de destructores. - -Esto no es recomendable, pero está permitido. - -Utilice las siguientes opciones: - -- Crear una función (`done()` o `finalize()`) que hará todo el trabajo de antemano que podría conducir a una excepción. Si se llamó a esa función, no debería haber excepciones en el destructor más adelante. -- Las tareas que son demasiado complejas (como enviar mensajes a través de la red) se pueden poner en un método separado al que el usuario de la clase tendrá que llamar antes de la destrucción. -- Si hay una excepción en el destructor, es mejor registrarla que ocultarla (si el registrador está disponible). -- En aplicaciones simples, es aceptable confiar en `std::terminate` (para los casos de `noexcept` de forma predeterminada en C ++ 11) para manejar excepciones. - -**6.** Bloques de código anónimos. - -Puede crear un bloque de código separado dentro de una sola función para hacer que ciertas variables sean locales, de modo que se llame a los destructores al salir del bloque. - -``` cpp -Block block = data.in->read(); - -{ - std::lock_guard lock(mutex); - data.ready = true; - data.block = block; -} - -ready_any.set(); -``` - -**7.** Multithreading. - -En programas de procesamiento de datos fuera de línea: - -- Trate de obtener el mejor rendimiento posible en un solo núcleo de CPU. A continuación, puede paralelizar su código si es necesario. - -En aplicaciones de servidor: - -- Utilice el grupo de subprocesos para procesar solicitudes. En este punto, no hemos tenido ninguna tarea que requiera el cambio de contexto de espacio de usuario. - -La horquilla no se usa para la paralelización. - -**8.** Sincronización de hilos. - -A menudo es posible hacer que diferentes hilos usen diferentes celdas de memoria (incluso mejor: diferentes líneas de caché) y no usar ninguna sincronización de hilos (excepto `joinAll`). - -Si se requiere sincronización, en la mayoría de los casos, es suficiente usar mutex bajo `lock_guard`. - -En otros casos, use primitivas de sincronización del sistema. No utilice la espera ocupada. - -Las operaciones atómicas deben usarse solo en los casos más simples. - -No intente implementar estructuras de datos sin bloqueo a menos que sea su principal área de especialización. - -**9.** Punteros vs referencias. - -En la mayoría de los casos, prefiera referencias. - -**10.** Construir. - -Usar referencias constantes, punteros a constantes, `const_iterator`, y métodos const. - -Considerar `const` para ser predeterminado y usar no-`const` sólo cuando sea necesario. - -Al pasar variables por valor, usando `const` por lo general no tiene sentido. - -**11.** sin firmar. - -Utilizar `unsigned` si es necesario. - -**12.** Tipos numéricos. - -Utilice los tipos `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, y `Int64`, así como `size_t`, `ssize_t`, y `ptrdiff_t`. - -No use estos tipos para números: `signed/unsigned long`, `long long`, `short`, `signed/unsigned char`, `char`. - -**13.** Pasando argumentos. - -Pasar valores complejos por referencia (incluyendo `std::string`). - -Si una función captura la propiedad de un objeto creado en el montón, cree el tipo de argumento `shared_ptr` o `unique_ptr`. - -**14.** Valores devueltos. - -En la mayoría de los casos, sólo tiene que utilizar `return`. No escribir `return std::move(res)`. - -Si la función asigna un objeto en el montón y lo devuelve, use `shared_ptr` o `unique_ptr`. - -En casos excepcionales, es posible que deba devolver el valor a través de un argumento. En este caso, el argumento debe ser una referencia. - -``` cpp -using AggregateFunctionPtr = std::shared_ptr; - -/** Allows creating an aggregate function by its name. - */ -class AggregateFunctionFactory -{ -public: - AggregateFunctionFactory(); - AggregateFunctionPtr get(const String & name, const DataTypes & argument_types) const; -``` - -**15.** espacio de nombres. - -No hay necesidad de usar un `namespace` para el código de aplicación. - -Las bibliotecas pequeñas tampoco necesitan esto. - -Para bibliotecas medianas a grandes, coloque todo en un `namespace`. - -En la biblioteca `.h` archivo, se puede utilizar `namespace detail` para ocultar los detalles de implementación no necesarios para el código de la aplicación. - -En un `.cpp` archivo, puede usar un `static` o espacio de nombres anónimo para ocultar símbolos. - -Además, un `namespace` puede ser utilizado para un `enum` para evitar que los nombres correspondientes caigan en un `namespace` (pero es mejor usar un `enum class`). - -**16.** Inicialización diferida. - -Si se requieren argumentos para la inicialización, normalmente no debe escribir un constructor predeterminado. - -Si más adelante tendrá que retrasar la inicialización, puede agregar un constructor predeterminado que creará un objeto no válido. O, para un pequeño número de objetos, puede usar `shared_ptr/unique_ptr`. - -``` cpp -Loader(DB::Connection * connection_, const std::string & query, size_t max_block_size_); - -/// For deferred initialization -Loader() {} -``` - -**17.** Funciones virtuales. - -Si la clase no está destinada para uso polimórfico, no necesita hacer que las funciones sean virtuales. Esto también se aplica al destructor. - -**18.** Codificación. - -Usa UTF-8 en todas partes. Utilizar `std::string`y`char *`. No use `std::wstring`y`wchar_t`. - -**19.** Tala. - -Vea los ejemplos en todas partes del código. - -Antes de confirmar, elimine todo el registro de depuración y sin sentido, y cualquier otro tipo de salida de depuración. - -Se debe evitar el registro en ciclos, incluso en el nivel Trace. - -Los registros deben ser legibles en cualquier nivel de registro. - -El registro solo debe usarse en el código de la aplicación, en su mayor parte. - -Los mensajes de registro deben estar escritos en inglés. - -El registro debe ser preferiblemente comprensible para el administrador del sistema. - -No use blasfemias en el registro. - -Utilice la codificación UTF-8 en el registro. En casos excepcionales, puede usar caracteres que no sean ASCII en el registro. - -**20.** Entrada-salida. - -No utilice `iostreams` en ciclos internos que son críticos para el rendimiento de la aplicación (y nunca usan `stringstream`). - -Utilice el `DB/IO` biblioteca en su lugar. - -**21.** Fecha y hora. - -Ver el `DateLUT` biblioteca. - -**22.** incluir. - -Utilice siempre `#pragma once` en lugar de incluir guardias. - -**23.** utilizar. - -`using namespace` no se utiliza. Usted puede utilizar `using` con algo específico. Pero hazlo local dentro de una clase o función. - -**24.** No use `trailing return type` para funciones a menos que sea necesario. - -``` cpp -auto f() -> void -``` - -**25.** Declaración e inicialización de variables. - -``` cpp -//right way -std::string s = "Hello"; -std::string s{"Hello"}; - -//wrong way -auto s = std::string{"Hello"}; -``` - -**26.** Para funciones virtuales, escriba `virtual` en la clase base, pero escribe `override` en lugar de `virtual` en las clases descendientes. - -## Características no utilizadas de C ++ {#unused-features-of-c} - -**1.** La herencia virtual no se utiliza. - -**2.** Los especificadores de excepción de C ++ 03 no se usan. - -## Plataforma {#platform} - -**1.** Escribimos código para una plataforma específica. - -Pero en igualdad de condiciones, se prefiere el código multiplataforma o portátil. - -**2.** Idioma: C++20. - -**3.** Compilación: `gcc`. En este momento (agosto 2020), el código se compila utilizando la versión 9.3. (También se puede compilar usando `clang 8`.) - -Se utiliza la biblioteca estándar (`libc++`). - -**4.**OS: Linux Ubuntu, no más viejo que Precise. - -**5.**El código está escrito para la arquitectura de CPU x86_64. - -El conjunto de instrucciones de CPU es el conjunto mínimo admitido entre nuestros servidores. Actualmente, es SSE 4.2. - -**6.** Utilizar `-Wall -Wextra -Werror` flags de compilación. - -**7.** Use enlaces estáticos con todas las bibliotecas, excepto aquellas a las que son difíciles de conectar estáticamente (consulte la salida de la `ldd` comando). - -**8.** El código se desarrolla y se depura con la configuración de la versión. - -## Herramienta {#tools} - -**1.** KDevelop es un buen IDE. - -**2.** Para la depuración, use `gdb`, `valgrind` (`memcheck`), `strace`, `-fsanitize=...`, o `tcmalloc_minimal_debug`. - -**3.** Para crear perfiles, use `Linux Perf`, `valgrind` (`callgrind`), o `strace -cf`. - -**4.** Las fuentes están en Git. - -**5.** Usos de ensamblaje `CMake`. - -**6.** Los programas se lanzan usando `deb` paquete. - -**7.** Los compromisos a dominar no deben romper la compilación. - -Aunque solo las revisiones seleccionadas se consideran viables. - -**8.** Realice confirmaciones tan a menudo como sea posible, incluso si el código está parcialmente listo. - -Use ramas para este propósito. - -Si su código en el `master` branch todavía no se puede construir, excluirlo de la compilación antes de la `push`. Tendrá que terminarlo o eliminarlo dentro de unos días. - -**9.** Para cambios no triviales, use ramas y publíquelas en el servidor. - -**10.** El código no utilizado se elimina del repositorio. - -## Biblioteca {#libraries} - -**1.** Se utiliza la biblioteca estándar de C++20 (se permiten extensiones experimentales), así como `boost` y `Poco` marco. - -**2.** Si es necesario, puede usar cualquier biblioteca conocida disponible en el paquete del sistema operativo. - -Si ya hay una buena solución disponible, úsela, incluso si eso significa que debe instalar otra biblioteca. - -(Pero prepárese para eliminar las bibliotecas incorrectas del código.) - -**3.** Puede instalar una biblioteca que no esté en los paquetes, si los paquetes no tienen lo que necesita o tienen una versión obsoleta o el tipo de compilación incorrecto. - -**4.** Si la biblioteca es pequeña y no tiene su propio sistema de compilación complejo, coloque los archivos `contrib` carpeta. - -**5.** Siempre se da preferencia a las bibliotecas que ya están en uso. - -## Recomendaciones generales {#general-recommendations-1} - -**1.** Escribe el menor código posible. - -**2.** Pruebe la solución más simple. - -**3.** No escriba código hasta que sepa cómo va a funcionar y cómo funcionará el bucle interno. - -**4.** En los casos más simples, use `using` en lugar de clases o estructuras. - -**5.** Si es posible, no escriba constructores de copia, operadores de asignación, destructores (que no sean virtuales, si la clase contiene al menos una función virtual), mueva constructores o mueva operadores de asignación. En otras palabras, las funciones generadas por el compilador deben funcionar correctamente. Usted puede utilizar `default`. - -**6.** Se fomenta la simplificación del código. Reduzca el tamaño de su código siempre que sea posible. - -## Recomendaciones adicionales {#additional-recommendations} - -**1.** Especificar explícitamente `std::` para tipos de `stddef.h` - -no se recomienda. En otras palabras, recomendamos escribir `size_t` en su lugar `std::size_t` porque es más corto. - -Es aceptable agregar `std::`. - -**2.** Especificar explícitamente `std::` para funciones de la biblioteca C estándar - -no se recomienda. En otras palabras, escribir `memcpy` en lugar de `std::memcpy`. - -La razón es que hay funciones no estándar similares, tales como `memmem`. Utilizamos estas funciones en ocasiones. Estas funciones no existen en `namespace std`. - -Si usted escribe `std::memcpy` en lugar de `memcpy` en todas partes, entonces `memmem` sin `std::` se verá extraño. - -Sin embargo, todavía puedes usar `std::` si lo prefieres. - -**3.** Usar funciones de C cuando las mismas están disponibles en la biblioteca estándar de C ++. - -Esto es aceptable si es más eficiente. - -Por ejemplo, use `memcpy` en lugar de `std::copy` para copiar grandes trozos de memoria. - -**4.** Argumentos de función multilínea. - -Se permite cualquiera de los siguientes estilos de ajuste: - -``` cpp -function( - T1 x1, - T2 x2) -``` - -``` cpp -function( - size_t left, size_t right, - const & RangesInDataParts ranges, - size_t limit) -``` - -``` cpp -function(size_t left, size_t right, - const & RangesInDataParts ranges, - size_t limit) -``` - -``` cpp -function(size_t left, size_t right, - const & RangesInDataParts ranges, - size_t limit) -``` - -``` cpp -function( - size_t left, - size_t right, - const & RangesInDataParts ranges, - size_t limit) -``` - -[Artículo Original](https://clickhouse.tech/docs/en/development/style/) diff --git a/docs/es/development/tests.md b/docs/es/development/tests.md deleted file mode 120000 index c03d36c3916..00000000000 --- a/docs/es/development/tests.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/tests.md \ No newline at end of file diff --git a/docs/es/engines/database-engines/atomic.md b/docs/es/engines/database-engines/atomic.md deleted file mode 100644 index f019b94a00b..00000000000 --- a/docs/es/engines/database-engines/atomic.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -toc_priority: 32 -toc_title: Atomic ---- - - -# Atomic {#atomic} - -It is supports non-blocking `DROP` and `RENAME TABLE` queries and atomic `EXCHANGE TABLES t1 AND t2` queries. Atomic database engine is used by default. - -## Creating a Database {#creating-a-database} - -```sql -CREATE DATABASE test ENGINE = Atomic; -``` - -[Original article](https://clickhouse.tech/docs/en/engines/database_engines/atomic/) diff --git a/docs/es/engines/database-engines/index.md b/docs/es/engines/database-engines/index.md deleted file mode 100644 index 8784b9bd02b..00000000000 --- a/docs/es/engines/database-engines/index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Motores de base de datos -toc_priority: 27 -toc_title: "Implantaci\xF3n" ---- - -# Motores de base de datos {#database-engines} - -Los motores de bases de datos le permiten trabajar con tablas. - -De forma predeterminada, ClickHouse utiliza su motor de base de datos nativa, que proporciona [motores de mesa](../../engines/table-engines/index.md) y una [Dialecto SQL](../../sql-reference/syntax.md). - -También puede utilizar los siguientes motores de base de datos: - -- [MySQL](mysql.md) - -- [Perezoso](lazy.md) - -[Artículo Original](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/es/engines/database-engines/lazy.md b/docs/es/engines/database-engines/lazy.md deleted file mode 100644 index 0988c4cb395..00000000000 --- a/docs/es/engines/database-engines/lazy.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 31 -toc_title: Perezoso ---- - -# Perezoso {#lazy} - -Mantiene las tablas en RAM solamente `expiration_time_in_seconds` segundos después del último acceso. Solo se puede usar con tablas \*Log. - -Está optimizado para almacenar muchas tablas pequeñas \* Log, para las cuales hay un largo intervalo de tiempo entre los accesos. - -## Creación de una base de datos {#creating-a-database} - - CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); - -[Artículo Original](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/es/engines/database-engines/mysql.md b/docs/es/engines/database-engines/mysql.md deleted file mode 100644 index 5f1dec97f35..00000000000 --- a/docs/es/engines/database-engines/mysql.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 30 -toc_title: MySQL ---- - -# MySQL {#mysql} - -Permite conectarse a bases de datos en un servidor MySQL remoto y realizar `INSERT` y `SELECT` consultas para intercambiar datos entre ClickHouse y MySQL. - -El `MySQL` motor de base de datos traducir consultas al servidor MySQL para que pueda realizar operaciones tales como `SHOW TABLES` o `SHOW CREATE TABLE`. - -No puede realizar las siguientes consultas: - -- `RENAME` -- `CREATE TABLE` -- `ALTER` - -## Creación de una base de datos {#creating-a-database} - -``` sql -CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] -ENGINE = MySQL('host:port', ['database' | database], 'user', 'password') -``` - -**Parámetros del motor** - -- `host:port` — MySQL server address. -- `database` — Remote database name. -- `user` — MySQL user. -- `password` — User password. - -## Soporte de tipos de datos {#data_types-support} - -| MySQL | Haga clic en Casa | -|----------------------------------|--------------------------------------------------------------| -| UNSIGNED TINYINT | [UInt8](../../sql-reference/data-types/int-uint.md) | -| TINYINT | [Int8](../../sql-reference/data-types/int-uint.md) | -| UNSIGNED SMALLINT | [UInt16](../../sql-reference/data-types/int-uint.md) | -| SMALLINT | [Int16](../../sql-reference/data-types/int-uint.md) | -| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../sql-reference/data-types/int-uint.md) | -| INT, MEDIUMINT | [Int32](../../sql-reference/data-types/int-uint.md) | -| UNSIGNED BIGINT | [UInt64](../../sql-reference/data-types/int-uint.md) | -| BIGINT | [Int64](../../sql-reference/data-types/int-uint.md) | -| FLOAT | [Float32](../../sql-reference/data-types/float.md) | -| DOUBLE | [Float64](../../sql-reference/data-types/float.md) | -| DATE | [Fecha](../../sql-reference/data-types/date.md) | -| DATETIME, TIMESTAMP | [FechaHora](../../sql-reference/data-types/datetime.md) | -| BINARY | [Cadena fija](../../sql-reference/data-types/fixedstring.md) | - -Todos los demás tipos de datos MySQL se convierten en [Cadena](../../sql-reference/data-types/string.md). - -[NULL](../../sql-reference/data-types/nullable.md) se admite. - -## Ejemplos de uso {#examples-of-use} - -Tabla en MySQL: - -``` text -mysql> USE test; -Database changed - -mysql> CREATE TABLE `mysql_table` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `float` FLOAT NOT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from mysql_table; -+------+-----+ -| int_id | value | -+------+-----+ -| 1 | 2 | -+------+-----+ -1 row in set (0,00 sec) -``` - -Base de datos en ClickHouse, intercambiando datos con el servidor MySQL: - -``` sql -CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') -``` - -``` sql -SHOW DATABASES -``` - -``` text -┌─name─────┐ -│ default │ -│ mysql_db │ -│ system │ -└──────────┘ -``` - -``` sql -SHOW TABLES FROM mysql_db -``` - -``` text -┌─name─────────┐ -│ mysql_table │ -└──────────────┘ -``` - -``` sql -SELECT * FROM mysql_db.mysql_table -``` - -``` text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -└────────┴───────┘ -``` - -``` sql -INSERT INTO mysql_db.mysql_table VALUES (3,4) -``` - -``` sql -SELECT * FROM mysql_db.mysql_table -``` - -``` text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -│ 3 │ 4 │ -└────────┴───────┘ -``` - -[Artículo Original](https://clickhouse.tech/docs/en/database_engines/mysql/) diff --git a/docs/es/engines/index.md b/docs/es/engines/index.md deleted file mode 100644 index 03e4426dd8d..00000000000 --- a/docs/es/engines/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Motor -toc_priority: 25 ---- - - diff --git a/docs/es/engines/table-engines/index.md b/docs/es/engines/table-engines/index.md deleted file mode 100644 index 7be315e3ee3..00000000000 --- a/docs/es/engines/table-engines/index.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Motores de mesa -toc_priority: 26 -toc_title: "Implantaci\xF3n" ---- - -# Motores de mesa {#table_engines} - -El motor de tabla (tipo de tabla) determina: - -- Cómo y dónde se almacenan los datos, dónde escribirlos y dónde leerlos. -- Qué consultas son compatibles y cómo. -- Acceso a datos simultáneos. -- Uso de índices, si está presente. -- Si es posible la ejecución de solicitudes multiproceso. -- Parámetros de replicación de datos. - -## Familias de motores {#engine-families} - -### Método de codificación de datos: {#mergetree} - -Los motores de mesa más universales y funcionales para tareas de alta carga. La propiedad compartida por estos motores es la inserción rápida de datos con el posterior procesamiento de datos en segundo plano. `MergeTree` Los motores familiares admiten la replicación de datos (con [Replicado\*](mergetree-family/replication.md#table_engines-replication) versiones de motores), particionamiento y otras características no admitidas en otros motores. - -Motores en la familia: - -- [Método de codificación de datos:](mergetree-family/mergetree.md#mergetree) -- [ReplacingMergeTree](mergetree-family/replacingmergetree.md#replacingmergetree) -- [SummingMergeTree](mergetree-family/summingmergetree.md#summingmergetree) -- [AgregaciónMergeTree](mergetree-family/aggregatingmergetree.md#aggregatingmergetree) -- [ColapsarMergeTree](mergetree-family/collapsingmergetree.md#table_engine-collapsingmergetree) -- [VersionedCollapsingMergeTree](mergetree-family/versionedcollapsingmergetree.md#versionedcollapsingmergetree) -- [GraphiteMergeTree](mergetree-family/graphitemergetree.md#graphitemergetree) - -### Registro {#log} - -Ligero [motor](log-family/index.md) con funcionalidad mínima. Son los más efectivos cuando necesita escribir rápidamente muchas tablas pequeñas (hasta aproximadamente 1 millón de filas) y leerlas más tarde como un todo. - -Motores en la familia: - -- [TinyLog](log-family/tinylog.md#tinylog) -- [StripeLog](log-family/stripelog.md#stripelog) -- [Registro](log-family/log.md#log) - -### Motores de integración {#integration-engines} - -Motores para comunicarse con otros sistemas de almacenamiento y procesamiento de datos. - -Motores en la familia: - -- [Kafka](integrations/kafka.md#kafka) -- [MySQL](integrations/mysql.md#mysql) -- [ODBC](integrations/odbc.md#table-engine-odbc) -- [JDBC](integrations/jdbc.md#table-engine-jdbc) -- [HDFS](integrations/hdfs.md#hdfs) - -### Motores especiales {#special-engines} - -Motores en la familia: - -- [Distribuido](special/distributed.md#distributed) -- [Método de codificación de datos:](special/materializedview.md#materializedview) -- [Diccionario](special/dictionary.md#dictionary) -- \[Fusión\](special/merge.md#merge -- [File](special/file.md#file) -- [Nulo](special/null.md#null) -- [Establecer](special/set.md#set) -- [Unir](special/join.md#join) -- [URL](special/url.md#table_engines-url) -- [Vista](special/view.md#table_engines-view) -- [Memoria](special/memory.md#memory) -- [Búfer](special/buffer.md#buffer) - -## Virtual Columnas {#table_engines-virtual_columns} - -La columna virtual es un atributo de motor de tabla integral que se define en el código fuente del motor. - -No debe especificar columnas virtuales en el `CREATE TABLE` consulta y no puedes verlos en `SHOW CREATE TABLE` y `DESCRIBE TABLE` resultados de la consulta. Las columnas virtuales también son de solo lectura, por lo que no puede insertar datos en columnas virtuales. - -Para seleccionar datos de una columna virtual, debe especificar su nombre en el `SELECT` consulta. `SELECT *` no devuelve valores de columnas virtuales. - -Si crea una tabla con una columna que tiene el mismo nombre que una de las columnas virtuales de la tabla, la columna virtual se vuelve inaccesible. No recomendamos hacer esto. Para ayudar a evitar conflictos, los nombres de columna virtual suelen tener el prefijo de un guión bajo. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/) diff --git a/docs/es/engines/table-engines/integrations/hdfs.md b/docs/es/engines/table-engines/integrations/hdfs.md deleted file mode 100644 index 5e0211660f5..00000000000 --- a/docs/es/engines/table-engines/integrations/hdfs.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 36 -toc_title: HDFS ---- - -# HDFS {#table_engines-hdfs} - -Este motor proporciona integración con [Acerca de nosotros](https://en.wikipedia.org/wiki/Apache_Hadoop) permitiendo gestionar datos sobre [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)a través de ClickHouse. Este motor es similar -a la [File](../special/file.md#table_engines-file) y [URL](../special/url.md#table_engines-url) motores, pero proporciona características específicas de Hadoop. - -## Uso {#usage} - -``` sql -ENGINE = HDFS(URI, format) -``` - -El `URI` El parámetro es el URI del archivo completo en HDFS. -El `format` parámetro especifica uno de los formatos de archivo disponibles. Realizar -`SELECT` consultas, el formato debe ser compatible para la entrada, y para realizar -`INSERT` queries – for output. The available formats are listed in the -[Formato](../../../interfaces/formats.md#formats) apartado. -La parte de la ruta de `URI` puede contener globs. En este caso, la tabla sería de solo lectura. - -**Ejemplo:** - -**1.** Configurar el `hdfs_engine_table` tabla: - -``` sql -CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') -``` - -**2.** Llenar archivo: - -``` sql -INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) -``` - -**3.** Consultar los datos: - -``` sql -SELECT * FROM hdfs_engine_table LIMIT 2 -``` - -``` text -┌─name─┬─value─┐ -│ one │ 1 │ -│ two │ 2 │ -└──────┴───────┘ -``` - -## Detalles de implementación {#implementation-details} - -- Las lecturas y escrituras pueden ser paralelas -- No soportado: - - `ALTER` y `SELECT...SAMPLE` operación. - - Índices. - - Replicación. - -**Globs en el camino** - -Múltiples componentes de ruta de acceso pueden tener globs. Para ser procesado, el archivo debe existir y coincidir con todo el patrón de ruta. Listado de archivos determina durante `SELECT` (no en `CREATE` momento). - -- `*` — Substitutes any number of any characters except `/` incluyendo cadena vacía. -- `?` — Substitutes any single character. -- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. - -Construcciones con `{}` son similares a la [remoto](../../../sql-reference/table-functions/remote.md) función de la tabla. - -**Ejemplo** - -1. Supongamos que tenemos varios archivos en formato TSV con los siguientes URI en HDFS: - -- ‘hdfs://hdfs1:9000/some_dir/some_file_1’ -- ‘hdfs://hdfs1:9000/some_dir/some_file_2’ -- ‘hdfs://hdfs1:9000/some_dir/some_file_3’ -- ‘hdfs://hdfs1:9000/another_dir/some_file_1’ -- ‘hdfs://hdfs1:9000/another_dir/some_file_2’ -- ‘hdfs://hdfs1:9000/another_dir/some_file_3’ - -1. Hay varias maneras de hacer una tabla que consta de los seis archivos: - - - -``` sql -CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') -``` - -Otra forma: - -``` sql -CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') -``` - -La tabla consta de todos los archivos en ambos directorios (todos los archivos deben satisfacer el formato y el esquema descritos en la consulta): - -``` sql -CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') -``` - -!!! warning "Advertencia" - Si la lista de archivos contiene rangos de números con ceros a la izquierda, use la construcción con llaves para cada dígito por separado o use `?`. - -**Ejemplo** - -Crear tabla con archivos llamados `file000`, `file001`, … , `file999`: - -``` sql -CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') -``` - -## Virtual Columnas {#virtual-columns} - -- `_path` — Path to the file. -- `_file` — Name of the file. - -**Ver también** - -- [Virtual columnas](../index.md#table_engines-virtual_columns) - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/es/engines/table-engines/integrations/index.md b/docs/es/engines/table-engines/integrations/index.md deleted file mode 100644 index e57aaf88744..00000000000 --- a/docs/es/engines/table-engines/integrations/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Integraci\xF3n" -toc_priority: 30 ---- - - diff --git a/docs/es/engines/table-engines/integrations/jdbc.md b/docs/es/engines/table-engines/integrations/jdbc.md deleted file mode 100644 index fd3450cef7c..00000000000 --- a/docs/es/engines/table-engines/integrations/jdbc.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 34 -toc_title: JDBC ---- - -# JDBC {#table-engine-jdbc} - -Permite que ClickHouse se conecte a bases de datos externas a través de [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). - -Para implementar la conexión JDBC, ClickHouse utiliza el programa independiente [Sistema abierto.](https://github.com/alex-krash/clickhouse-jdbc-bridge) que debería ejecutarse como un demonio. - -Este motor soporta el [NULL](../../../sql-reference/data-types/nullable.md) tipo de datos. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name -( - columns list... -) -ENGINE = JDBC(dbms_uri, external_database, external_table) -``` - -**Parámetros del motor** - -- `dbms_uri` — URI of an external DBMS. - - Formato: `jdbc:://:/?user=&password=`. - Ejemplo para MySQL: `jdbc:mysql://localhost:3306/?user=root&password=root`. - -- `external_database` — Database in an external DBMS. - -- `external_table` — Name of the table in `external_database`. - -## Ejemplo de uso {#usage-example} - -Crear una tabla en el servidor MySQL conectándose directamente con su cliente de consola: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+------+----------+-----+----------+ -| int_id | int_nullable | float | float_nullable | -+------+----------+-----+----------+ -| 1 | NULL | 2 | NULL | -+------+----------+-----+----------+ -1 row in set (0,00 sec) -``` - -Creación de una tabla en el servidor ClickHouse y selección de datos de ella: - -``` sql -CREATE TABLE jdbc_table -( - `int_id` Int32, - `int_nullable` Nullable(Int32), - `float` Float32, - `float_nullable` Nullable(Float32) -) -ENGINE JDBC('jdbc:mysql://localhost:3306/?user=root&password=root', 'test', 'test') -``` - -``` sql -SELECT * -FROM jdbc_table -``` - -``` text -┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ -└────────┴──────────────┴───────┴────────────────┘ -``` - -## Ver también {#see-also} - -- [Función de la tabla de JDBC](../../../sql-reference/table-functions/jdbc.md). - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/es/engines/table-engines/integrations/kafka.md b/docs/es/engines/table-engines/integrations/kafka.md deleted file mode 100644 index 54250aae82a..00000000000 --- a/docs/es/engines/table-engines/integrations/kafka.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 32 -toc_title: Kafka ---- - -# Kafka {#kafka} - -Este motor funciona con [Acerca de nosotros](http://kafka.apache.org/). - -Kafka te permite: - -- Publicar o suscribirse a flujos de datos. -- Organice el almacenamiento tolerante a fallos. -- Secuencias de proceso a medida que estén disponibles. - -## Creación de una tabla {#table_engine-kafka-creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = Kafka() -SETTINGS - kafka_broker_list = 'host:port', - kafka_topic_list = 'topic1,topic2,...', - kafka_group_name = 'group_name', - kafka_format = 'data_format'[,] - [kafka_row_delimiter = 'delimiter_symbol',] - [kafka_schema = '',] - [kafka_num_consumers = N,] - [kafka_max_block_size = 0,] - [kafka_skip_broken_messages = N,] - [kafka_commit_every_batch = 0] -``` - -Parámetros requeridos: - -- `kafka_broker_list` – A comma-separated list of brokers (for example, `localhost:9092`). -- `kafka_topic_list` – A list of Kafka topics. -- `kafka_group_name` – A group of Kafka consumers. Reading margins are tracked for each group separately. If you don't want messages to be duplicated in the cluster, use the same group name everywhere. -- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` función, tal como `JSONEachRow`. Para obtener más información, consulte [Formato](../../../interfaces/formats.md) apartado. - -Parámetros opcionales: - -- `kafka_row_delimiter` – Delimiter character, which ends the message. -- `kafka_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap'n Proto](https://capnproto.org/) requiere la ruta de acceso al archivo de esquema y el nombre de la raíz `schema.capnp:Message` objeto. -- `kafka_num_consumers` – The number of consumers per table. Default: `1`. Especifique más consumidores si el rendimiento de un consumidor es insuficiente. El número total de consumidores no debe exceder el número de particiones en el tema, ya que solo se puede asignar un consumidor por partición. -- `kafka_max_block_size` - El tamaño máximo de lote (en mensajes) para la encuesta (predeterminado: `max_block_size`). -- `kafka_skip_broken_messages` – Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. Si `kafka_skip_broken_messages = N` entonces el motor salta *N* Mensajes de Kafka que no se pueden analizar (un mensaje es igual a una fila de datos). -- `kafka_commit_every_batch` - Confirmar cada lote consumido y manejado en lugar de una única confirmación después de escribir un bloque completo (predeterminado: `0`). - -Ejemplos: - -``` sql - CREATE TABLE queue ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); - - SELECT * FROM queue LIMIT 5; - - CREATE TABLE queue2 ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', - kafka_topic_list = 'topic', - kafka_group_name = 'group1', - kafka_format = 'JSONEachRow', - kafka_num_consumers = 4; - - CREATE TABLE queue2 ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') - SETTINGS kafka_format = 'JSONEachRow', - kafka_num_consumers = 4; -``` - -
- -Método obsoleto para crear una tabla - -!!! attention "Atención" - No utilice este método en nuevos proyectos. Si es posible, cambie los proyectos antiguos al método descrito anteriormente. - -``` sql -Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format - [, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages]) -``` - -
- -## Descripci {#description} - -Los mensajes entregados se realizan un seguimiento automático, por lo que cada mensaje de un grupo solo se cuenta una vez. Si desea obtener los datos dos veces, cree una copia de la tabla con otro nombre de grupo. - -Los grupos son flexibles y se sincronizan en el clúster. Por ejemplo, si tiene 10 temas y 5 copias de una tabla en un clúster, cada copia obtiene 2 temas. Si el número de copias cambia, los temas se redistribuyen automáticamente entre las copias. Lea más sobre esto en http://kafka.apache.org/intro . - -`SELECT` no es particularmente útil para leer mensajes (excepto para la depuración), ya que cada mensaje se puede leer solo una vez. Es más práctico crear subprocesos en tiempo real utilizando vistas materializadas. Para hacer esto: - -1. Use el motor para crear un consumidor de Kafka y considérelo como un flujo de datos. -2. Crea una tabla con la estructura deseada. -3. Cree una vista materializada que convierta los datos del motor y los coloque en una tabla creada previamente. - -Cuando el `MATERIALIZED VIEW` se une al motor, comienza a recopilar datos en segundo plano. Esto le permite recibir continuamente mensajes de Kafka y convertirlos al formato requerido usando `SELECT`. -Una tabla kafka puede tener tantas vistas materializadas como desee, no leen datos de la tabla kafka directamente, sino que reciben nuevos registros (en bloques), de esta manera puede escribir en varias tablas con diferentes niveles de detalle (con agrupación - agregación y sin). - -Ejemplo: - -``` sql - CREATE TABLE queue ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); - - CREATE TABLE daily ( - day Date, - level String, - total UInt64 - ) ENGINE = SummingMergeTree(day, (day, level), 8192); - - CREATE MATERIALIZED VIEW consumer TO daily - AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total - FROM queue GROUP BY day, level; - - SELECT level, sum(total) FROM daily GROUP BY level; -``` - -Para mejorar el rendimiento, los mensajes recibidos se agrupan en bloques del tamaño de [Max_insert_block_size](../../../operations/server-configuration-parameters/settings.md#settings-max_insert_block_size). Si el bloque no se formó dentro de [Nombre de la red inalámbrica (SSID):](../../../operations/server-configuration-parameters/settings.md) milisegundos, los datos se vaciarán a la tabla independientemente de la integridad del bloque. - -Para detener la recepción de datos de tema o cambiar la lógica de conversión, desconecte la vista materializada: - -``` sql - DETACH TABLE consumer; - ATTACH TABLE consumer; -``` - -Si desea cambiar la tabla de destino utilizando `ALTER`, recomendamos deshabilitar la vista de material para evitar discrepancias entre la tabla de destino y los datos de la vista. - -## Configuración {#configuration} - -Similar a GraphiteMergeTree, el motor Kafka admite una configuración extendida utilizando el archivo de configuración ClickHouse. Hay dos claves de configuración que puede usar: global (`kafka`) y a nivel de tema (`kafka_*`). La configuración global se aplica primero y, a continuación, se aplica la configuración de nivel de tema (si existe). - -``` xml - - - cgrp - smallest - - - - - 250 - 100000 - -``` - -Para obtener una lista de posibles opciones de configuración, consulte [referencia de configuración librdkafka](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). Usa el guión bajo (`_`) en lugar de un punto en la configuración de ClickHouse. Por ejemplo, `check.crcs=true` será `true`. - -## Virtual Columnas {#virtual-columns} - -- `_topic` — Kafka topic. -- `_key` — Key of the message. -- `_offset` — Offset of the message. -- `_timestamp` — Timestamp of the message. -- `_partition` — Partition of Kafka topic. - -**Ver también** - -- [Virtual columnas](../index.md#table_engines-virtual_columns) - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) diff --git a/docs/es/engines/table-engines/integrations/mysql.md b/docs/es/engines/table-engines/integrations/mysql.md deleted file mode 100644 index 52799117255..00000000000 --- a/docs/es/engines/table-engines/integrations/mysql.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 33 -toc_title: MySQL ---- - -# Mysql {#mysql} - -El motor MySQL le permite realizar `SELECT` consultas sobre datos almacenados en un servidor MySQL remoto. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], - ... -) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); -``` - -Vea una descripción detallada del [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) consulta. - -La estructura de la tabla puede diferir de la estructura de la tabla MySQL original: - -- Los nombres de columna deben ser los mismos que en la tabla MySQL original, pero puede usar solo algunas de estas columnas y en cualquier orden. -- Los tipos de columna pueden diferir de los de la tabla MySQL original. ClickHouse intenta [elenco](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) valores a los tipos de datos ClickHouse. - -**Parámetros del motor** - -- `host:port` — MySQL server address. - -- `database` — Remote database name. - -- `table` — Remote table name. - -- `user` — MySQL user. - -- `password` — User password. - -- `replace_query` — Flag that converts `INSERT INTO` consultas a `REPLACE INTO`. Si `replace_query=1`, la consulta se sustituye. - -- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expresión que se añade a la `INSERT` consulta. - - Ejemplo: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, donde `on_duplicate_clause` ser `UPDATE c2 = c2 + 1`. Ver el [Documentación de MySQL](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html) para encontrar qué `on_duplicate_clause` se puede utilizar con el `ON DUPLICATE KEY` clausula. - - Especificar `on_duplicate_clause` tienes que pasar `0` a la `replace_query` parámetro. Si pasa simultáneamente `replace_query = 1` y `on_duplicate_clause`, ClickHouse genera una excepción. - -Simple `WHERE` cláusulas tales como `=, !=, >, >=, <, <=` se ejecutan en el servidor MySQL. - -El resto de las condiciones y el `LIMIT` La restricción de muestreo se ejecuta en ClickHouse solo después de que finalice la consulta a MySQL. - -## Ejemplo de uso {#usage-example} - -Tabla en MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+------+----------+-----+----------+ -| int_id | int_nullable | float | float_nullable | -+------+----------+-----+----------+ -| 1 | NULL | 2 | NULL | -+------+----------+-----+----------+ -1 row in set (0,00 sec) -``` - -Tabla en ClickHouse, recuperando datos de la tabla MySQL creada anteriormente: - -``` sql -CREATE TABLE mysql_table -( - `float_nullable` Nullable(Float32), - `int_id` Int32 -) -ENGINE = MySQL('localhost:3306', 'test', 'test', 'bayonet', '123') -``` - -``` sql -SELECT * FROM mysql_table -``` - -``` text -┌─float_nullable─┬─int_id─┐ -│ ᴺᵁᴸᴸ │ 1 │ -└────────────────┴────────┘ -``` - -## Ver también {#see-also} - -- [El ‘mysql’ función de la tabla](../../../sql-reference/table-functions/mysql.md) -- [Uso de MySQL como fuente de diccionario externo](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql) - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) diff --git a/docs/es/engines/table-engines/integrations/odbc.md b/docs/es/engines/table-engines/integrations/odbc.md deleted file mode 100644 index 75c79484d61..00000000000 --- a/docs/es/engines/table-engines/integrations/odbc.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 35 -toc_title: ODBC ---- - -# ODBC {#table-engine-odbc} - -Permite que ClickHouse se conecte a bases de datos externas a través de [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). - -Para implementar con seguridad conexiones ODBC, ClickHouse usa un programa separado `clickhouse-odbc-bridge`. Si el controlador ODBC se carga directamente desde `clickhouse-server`, problemas de controlador pueden bloquear el servidor ClickHouse. ClickHouse se inicia automáticamente `clickhouse-odbc-bridge` cuando se requiere. El programa de puente ODBC se instala desde el mismo paquete que el `clickhouse-server`. - -Este motor soporta el [NULL](../../../sql-reference/data-types/nullable.md) tipo de datos. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1], - name2 [type2], - ... -) -ENGINE = ODBC(connection_settings, external_database, external_table) -``` - -Vea una descripción detallada del [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) consulta. - -La estructura de la tabla puede diferir de la estructura de la tabla de origen: - -- Los nombres de columna deben ser los mismos que en la tabla de origen, pero puede usar solo algunas de estas columnas y en cualquier orden. -- Los tipos de columna pueden diferir de los de la tabla de origen. ClickHouse intenta [elenco](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) valores a los tipos de datos ClickHouse. - -**Parámetros del motor** - -- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. -- `external_database` — Name of a database in an external DBMS. -- `external_table` — Name of a table in the `external_database`. - -## Ejemplo de uso {#usage-example} - -**Recuperación de datos de la instalación local de MySQL a través de ODBC** - -Este ejemplo se comprueba para Ubuntu Linux 18.04 y el servidor MySQL 5.7. - -Asegúrese de que unixODBC y MySQL Connector están instalados. - -De forma predeterminada (si se instala desde paquetes), ClickHouse comienza como usuario `clickhouse`. Por lo tanto, debe crear y configurar este usuario en el servidor MySQL. - -``` bash -$ sudo mysql -``` - -``` sql -mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; -mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; -``` - -A continuación, configure la conexión en `/etc/odbc.ini`. - -``` bash -$ cat /etc/odbc.ini -[mysqlconn] -DRIVER = /usr/local/lib/libmyodbc5w.so -SERVER = 127.0.0.1 -PORT = 3306 -DATABASE = test -USERNAME = clickhouse -PASSWORD = clickhouse -``` - -Puede verificar la conexión usando el `isql` utilidad desde la instalación de unixODBC. - -``` bash -$ isql -v mysqlconn -+-------------------------+ -| Connected! | -| | -... -``` - -Tabla en MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+------+----------+-----+----------+ -| int_id | int_nullable | float | float_nullable | -+------+----------+-----+----------+ -| 1 | NULL | 2 | NULL | -+------+----------+-----+----------+ -1 row in set (0,00 sec) -``` - -Tabla en ClickHouse, recuperando datos de la tabla MySQL: - -``` sql -CREATE TABLE odbc_t -( - `int_id` Int32, - `float_nullable` Nullable(Float32) -) -ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') -``` - -``` sql -SELECT * FROM odbc_t -``` - -``` text -┌─int_id─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ -└────────┴────────────────┘ -``` - -## Ver también {#see-also} - -- [Diccionarios externos ODBC](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc) -- [Tabla ODBC función](../../../sql-reference/table-functions/odbc.md) - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/es/engines/table-engines/log-family/index.md b/docs/es/engines/table-engines/log-family/index.md deleted file mode 100644 index a7a3016f967..00000000000 --- a/docs/es/engines/table-engines/log-family/index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Familia de registro -toc_priority: 29 -toc_title: "Implantaci\xF3n" ---- - -# Familia del motor de registro {#log-engine-family} - -Estos motores fueron desarrollados para escenarios en los que necesita escribir rápidamente muchas tablas pequeñas (hasta aproximadamente 1 millón de filas) y leerlas más tarde en su conjunto. - -Motores de la familia: - -- [StripeLog](stripelog.md) -- [Registro](log.md) -- [TinyLog](tinylog.md) - -## Propiedades comunes {#common-properties} - -Motor: - -- Almacenar datos en un disco. - -- Agregue datos al final del archivo al escribir. - -- Bloqueos de soporte para el acceso a datos simultáneos. - - Durante `INSERT` consultas, la tabla está bloqueada y otras consultas para leer y escribir datos esperan a que la tabla se desbloquee. Si no hay consultas de escritura de datos, se puede realizar cualquier número de consultas de lectura de datos simultáneamente. - -- No apoyo [mutación](../../../sql-reference/statements/alter.md#alter-mutations) operación. - -- No admite índices. - - Esto significa que `SELECT` las consultas para rangos de datos no son eficientes. - -- No escriba datos atómicamente. - - Puede obtener una tabla con datos dañados si algo rompe la operación de escritura, por ejemplo, un cierre anormal del servidor. - -## Diferencia {#differences} - -El `TinyLog` es el más simple de la familia y proporciona la funcionalidad más pobre y la eficiencia más baja. El `TinyLog` el motor no admite la lectura de datos paralelos por varios hilos. Lee datos más lentamente que otros motores de la familia que admiten lectura paralela y utiliza casi tantos descriptores como los `Log` motor porque almacena cada columna en un archivo separado. Úselo en escenarios simples de baja carga. - -El `Log` y `StripeLog` Los motores admiten lectura de datos paralela. Al leer datos, ClickHouse usa múltiples hilos. Cada subproceso procesa un bloque de datos separado. El `Log` utiliza un archivo separado para cada columna de la tabla. `StripeLog` almacena todos los datos en un archivo. Como resultado, el `StripeLog` el motor utiliza menos descriptores en el sistema operativo, pero el `Log` proporciona una mayor eficiencia al leer datos. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) diff --git a/docs/es/engines/table-engines/log-family/log.md b/docs/es/engines/table-engines/log-family/log.md deleted file mode 100644 index 1db374390e4..00000000000 --- a/docs/es/engines/table-engines/log-family/log.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 33 -toc_title: Registro ---- - -# Registro {#log} - -El motor pertenece a la familia de motores de registro. Consulte las propiedades comunes de los motores de registro y sus diferencias en [Familia del motor de registro](index.md) artículo. - -El registro difiere de [TinyLog](tinylog.md) en que un pequeño archivo de “marks” reside con los archivos de columna. Estas marcas se escriben en cada bloque de datos y contienen compensaciones que indican dónde comenzar a leer el archivo para omitir el número especificado de filas. Esto hace posible leer datos de tabla en múltiples hilos. -Para el acceso a datos simultáneos, las operaciones de lectura se pueden realizar simultáneamente, mientras que las operaciones de escritura bloquean las lecturas entre sí. -El motor de registro no admite índices. Del mismo modo, si la escritura en una tabla falla, la tabla se rompe y la lectura de ella devuelve un error. El motor de registro es adecuado para datos temporales, tablas de escritura única y para fines de prueba o demostración. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/log/) diff --git a/docs/es/engines/table-engines/log-family/stripelog.md b/docs/es/engines/table-engines/log-family/stripelog.md deleted file mode 100644 index 0965e9a987c..00000000000 --- a/docs/es/engines/table-engines/log-family/stripelog.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 32 -toc_title: StripeLog ---- - -# Lista de Stripelog {#stripelog} - -Este motor pertenece a la familia de motores de registro. Consulte las propiedades comunes de los motores de registro y sus diferencias en [Familia del motor de registro](index.md) artículo. - -Utilice este motor en escenarios en los que necesite escribir muchas tablas con una pequeña cantidad de datos (menos de 1 millón de filas). - -## Creación de una tabla {#table_engines-stripelog-creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = StripeLog -``` - -Vea la descripción detallada del [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) consulta. - -## Escribir los datos {#table_engines-stripelog-writing-the-data} - -El `StripeLog` el motor almacena todas las columnas en un archivo. Para cada `INSERT` consulta, ClickHouse agrega el bloque de datos al final de un archivo de tabla, escribiendo columnas una por una. - -Para cada tabla, ClickHouse escribe los archivos: - -- `data.bin` — Data file. -- `index.mrk` — File with marks. Marks contain offsets for each column of each data block inserted. - -El `StripeLog` el motor no soporta el `ALTER UPDATE` y `ALTER DELETE` operación. - -## Lectura de los datos {#table_engines-stripelog-reading-the-data} - -El archivo con marcas permite ClickHouse paralelizar la lectura de datos. Esto significa que un `SELECT` query devuelve filas en un orden impredecible. Utilice el `ORDER BY` cláusula para ordenar filas. - -## Ejemplo de uso {#table_engines-stripelog-example-of-use} - -Creación de una tabla: - -``` sql -CREATE TABLE stripe_log_table -( - timestamp DateTime, - message_type String, - message String -) -ENGINE = StripeLog -``` - -Insertar datos: - -``` sql -INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The first regular message') -INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') -``` - -Se utilizaron dos `INSERT` consultas para crear dos bloques de datos dentro del `data.bin` file. - -ClickHouse usa múltiples subprocesos al seleccionar datos. Cada subproceso lee un bloque de datos separado y devuelve las filas resultantes de forma independiente a medida que termina. Como resultado, el orden de los bloques de filas en la salida no coincide con el orden de los mismos bloques en la entrada en la mayoría de los casos. Por ejemplo: - -``` sql -SELECT * FROM stripe_log_table -``` - -``` text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ -│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ -└─────────────────────┴──────────────┴────────────────────────────┘ -┌───────────timestamp─┬─message_type─┬─message───────────────────┐ -│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ -└─────────────────────┴──────────────┴───────────────────────────┘ -``` - -Ordenación de los resultados (orden ascendente por defecto): - -``` sql -SELECT * FROM stripe_log_table ORDER BY timestamp -``` - -``` text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ -│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ -│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ -└─────────────────────┴──────────────┴────────────────────────────┘ -``` - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/stripelog/) diff --git a/docs/es/engines/table-engines/log-family/tinylog.md b/docs/es/engines/table-engines/log-family/tinylog.md deleted file mode 100644 index a2cbf7257b6..00000000000 --- a/docs/es/engines/table-engines/log-family/tinylog.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 34 -toc_title: TinyLog ---- - -# TinyLog {#tinylog} - -El motor pertenece a la familia de motores de registro. Ver [Familia del motor de registro](index.md) para las propiedades comunes de los motores de registro y sus diferencias. - -Este motor de tablas se usa normalmente con el método write-once: escribir datos una vez, luego leerlos tantas veces como sea necesario. Por ejemplo, puede usar `TinyLog`-type tablas para datos intermedios que se procesan en pequeños lotes. Tenga en cuenta que el almacenamiento de datos en un gran número de tablas pequeñas es ineficiente. - -Las consultas se ejecutan en una sola secuencia. En otras palabras, este motor está diseñado para tablas relativamente pequeñas (hasta aproximadamente 1,000,000 filas). Tiene sentido usar este motor de tablas si tiene muchas tablas pequeñas, ya que es más simple que el [Registro](log.md) motor (menos archivos necesitan ser abiertos). - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/tinylog/) diff --git a/docs/es/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/es/engines/table-engines/mergetree-family/aggregatingmergetree.md deleted file mode 100644 index 2aedfbd2317..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 35 -toc_title: "Agregaci\xF3nMergeTree" ---- - -# Aggregatingmergetree {#aggregatingmergetree} - -El motor hereda de [Método de codificación de datos:](mergetree.md#table_engines-mergetree), alterando la lógica para la fusión de partes de datos. ClickHouse reemplaza todas las filas con la misma clave principal (o más exactamente, con la misma [clave de clasificación](mergetree.md)) con una sola fila (dentro de una parte de datos) que almacena una combinación de estados de funciones agregadas. - -Usted puede utilizar `AggregatingMergeTree` tablas para la agregación de datos incrementales, incluidas las vistas materializadas agregadas. - -El motor procesa todas las columnas con los siguientes tipos: - -- [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md) -- [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md) - -Es apropiado usar `AggregatingMergeTree` si reduce el número de filas por pedidos. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = AggregatingMergeTree() -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[TTL expr] -[SETTINGS name=value, ...] -``` - -Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../../sql-reference/statements/create.md). - -**Cláusulas de consulta** - -Al crear un `AggregatingMergeTree` mesa de la misma [clausula](mergetree.md) se requieren, como al crear un `MergeTree` tabla. - -
- -Método obsoleto para crear una tabla - -!!! attention "Atención" - No use este método en proyectos nuevos y, si es posible, cambie los proyectos antiguos al método descrito anteriormente. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) -``` - -Todos los parámetros tienen el mismo significado que en `MergeTree`. -
- -## SELECCIONAR e INSERTAR {#select-and-insert} - -Para insertar datos, utilice [INSERT SELECT](../../../sql-reference/statements/insert-into.md) consulta con funciones agregadas -State-. -Al seleccionar datos de `AggregatingMergeTree` mesa, uso `GROUP BY` cláusula y las mismas funciones agregadas que al insertar datos, pero usando `-Merge` sufijo. - -En los resultados de `SELECT` consulta, los valores de `AggregateFunction` tipo tiene representación binaria específica de la implementación para todos los formatos de salida de ClickHouse. Si volcar datos en, por ejemplo, `TabSeparated` formato con `SELECT` consulta entonces este volcado se puede cargar de nuevo usando `INSERT` consulta. - -## Ejemplo de una vista materializada agregada {#example-of-an-aggregated-materialized-view} - -`AggregatingMergeTree` vista materializada que mira el `test.visits` tabla: - -``` sql -CREATE MATERIALIZED VIEW test.basic -ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate) -AS SELECT - CounterID, - StartDate, - sumState(Sign) AS Visits, - uniqState(UserID) AS Users -FROM test.visits -GROUP BY CounterID, StartDate; -``` - -Insertar datos en el `test.visits` tabla. - -``` sql -INSERT INTO test.visits ... -``` - -Los datos se insertan tanto en la tabla como en la vista `test.basic` que realizará la agregación. - -Para obtener los datos agregados, necesitamos ejecutar una consulta como `SELECT ... GROUP BY ...` de la vista `test.basic`: - -``` sql -SELECT - StartDate, - sumMerge(Visits) AS Visits, - uniqMerge(Users) AS Users -FROM test.basic -GROUP BY StartDate -ORDER BY StartDate; -``` - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/aggregatingmergetree/) diff --git a/docs/es/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/es/engines/table-engines/mergetree-family/collapsingmergetree.md deleted file mode 100644 index 027d5c2adf7..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/collapsingmergetree.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 36 -toc_title: ColapsarMergeTree ---- - -# ColapsarMergeTree {#table_engine-collapsingmergetree} - -El motor hereda de [Método de codificación de datos:](mergetree.md) y agrega la lógica de las filas que colapsan al algoritmo de fusión de partes de datos. - -`CollapsingMergeTree` elimina de forma asincrónica (colapsa) pares de filas si todos los campos de una clave de ordenación (`ORDER BY`) son equivalentes excepto el campo particular `Sign` que puede tener `1` y `-1` valor. Las filas sin un par se mantienen. Para más detalles, consulte el [Derrumbar](#table_engine-collapsingmergetree-collapsing) sección del documento. - -El motor puede reducir significativamente el volumen de almacenamiento y aumentar la eficiencia de `SELECT` consulta como consecuencia. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = CollapsingMergeTree(sign) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -Para obtener una descripción de los parámetros de consulta, consulte [descripción de la consulta](../../../sql-reference/statements/create.md). - -**CollapsingMergeTree Parámetros** - -- `sign` — Name of the column with the type of row: `1` es una “state” fila, `-1` es una “cancel” fila. - - Column data type — `Int8`. - -**Cláusulas de consulta** - -Al crear un `CollapsingMergeTree` mesa, la misma [cláusulas de consulta](mergetree.md#table_engine-mergetree-creating-a-table) se requieren, como al crear un `MergeTree` tabla. - -
- -Método obsoleto para crear una tabla - -!!! attention "Atención" - No use este método en proyectos nuevos y, si es posible, cambie los proyectos antiguos al método descrito anteriormente. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] CollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign) -``` - -Todos los parámetros excepto `sign` el mismo significado que en `MergeTree`. - -- `sign` — Name of the column with the type of row: `1` — “state” fila, `-1` — “cancel” fila. - - Column Data Type — `Int8`. - -
- -## Derrumbar {#table_engine-collapsingmergetree-collapsing} - -### Datos {#data} - -Considere la situación en la que necesita guardar datos que cambian continuamente para algún objeto. Parece lógico tener una fila para un objeto y actualizarla en cualquier cambio, pero la operación de actualización es costosa y lenta para DBMS porque requiere la reescritura de los datos en el almacenamiento. Si necesita escribir datos rápidamente, la actualización no es aceptable, pero puede escribir los cambios de un objeto secuencialmente de la siguiente manera. - -Utilice la columna en particular `Sign`. Si `Sign = 1` significa que la fila es un estado de un objeto, llamémoslo “state” fila. Si `Sign = -1` significa la cancelación del estado de un objeto con los mismos atributos, llamémoslo “cancel” fila. - -Por ejemplo, queremos calcular cuántas páginas revisaron los usuarios en algún sitio y cuánto tiempo estuvieron allí. En algún momento escribimos la siguiente fila con el estado de la actividad del usuario: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -En algún momento después registramos el cambio de actividad del usuario y lo escribimos con las siguientes dos filas. - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -La primera fila cancela el estado anterior del objeto (usuario). Debe copiar los campos clave de ordenación del estado cancelado exceptuando `Sign`. - -La segunda fila contiene el estado actual. - -Como solo necesitamos el último estado de actividad del usuario, las filas - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -se puede eliminar colapsando el estado no válido (antiguo) de un objeto. `CollapsingMergeTree` hace esto mientras se fusionan las partes de datos. - -Por qué necesitamos 2 filas para cada cambio leído en el [Algoritmo](#table_engine-collapsingmergetree-collapsing-algorithm) apartado. - -**Propiedades peculiares de tal enfoque** - -1. El programa que escribe los datos debe recordar el estado de un objeto para poder cancelarlo. “Cancel” debe contener copias de los campos de clave de ordenación “state” y lo opuesto `Sign`. Aumenta el tamaño inicial de almacenamiento, pero permite escribir los datos rápidamente. -2. Las matrices de largo crecimiento en columnas reducen la eficiencia del motor debido a la carga para escribir. Los datos más sencillos, mayor será la eficiencia. -3. El `SELECT` Los resultados dependen en gran medida de la consistencia del historial de cambios de objetos. Sea preciso al preparar los datos para insertarlos. Puede obtener resultados impredecibles en datos incoherentes, por ejemplo, valores negativos para métricas no negativas, como la profundidad de la sesión. - -### Algoritmo {#table_engine-collapsingmergetree-collapsing-algorithm} - -Cuando ClickHouse combina partes de datos, cada grupo de filas consecutivas tiene la misma clave de ordenación (`ORDER BY`) se reduce a no más de dos filas, una con `Sign = 1` (“state” fila) y otro con `Sign = -1` (“cancel” fila). En otras palabras, las entradas colapsan. - -Para cada parte de datos resultante, ClickHouse guarda: - -1. El primero “cancel” y el último “state” si el número de “state” y “cancel” y la última fila es una “state” fila. -2. El último “state” fila, si hay más “state” filas que “cancel” filas. -3. El primero “cancel” fila, si hay más “cancel” filas que “state” filas. -4. Ninguna de las filas, en todos los demás casos. - -También cuando hay al menos 2 más “state” filas que “cancel” filas, o al menos 2 más “cancel” filas entonces “state” fila, la fusión continúa, pero ClickHouse trata esta situación como un error lógico y la registra en el registro del servidor. Este error puede producirse si se insertan los mismos datos más de una vez. - -Por lo tanto, el colapso no debe cambiar los resultados del cálculo de las estadísticas. -Los cambios colapsaron gradualmente para que al final solo quedara el último estado de casi todos los objetos. - -El `Sign` se requiere porque el algoritmo de fusión no garantiza que todas las filas con la misma clave de clasificación estén en la misma parte de datos resultante e incluso en el mismo servidor físico. Proceso de ClickHouse `SELECT` consultas con múltiples hilos, y no puede predecir el orden de las filas en el resultado. La agregación es necesaria si hay una necesidad de obtener completamente “collapsed” datos de `CollapsingMergeTree` tabla. - -Para finalizar el colapso, escriba una consulta con `GROUP BY` cláusula y funciones agregadas que representan el signo. Por ejemplo, para calcular la cantidad, use `sum(Sign)` en lugar de `count()`. Para calcular la suma de algo, use `sum(Sign * x)` en lugar de `sum(x)` y así sucesivamente, y también añadir `HAVING sum(Sign) > 0`. - -Los agregados `count`, `sum` y `avg` podría calcularse de esta manera. El agregado `uniq` podría calcularse si un objeto tiene al menos un estado no colapsado. Los agregados `min` y `max` no se pudo calcular porque `CollapsingMergeTree` no guarda el historial de valores de los estados colapsados. - -Si necesita extraer datos sin agregación (por ejemplo, para comprobar si hay filas presentes cuyos valores más recientes coinciden con ciertas condiciones), puede utilizar el `FINAL` modificador para el `FROM` clausula. Este enfoque es significativamente menos eficiente. - -## Ejemplo de uso {#example-of-use} - -Datos de ejemplo: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -Creación de la tabla: - -``` sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews UInt8, - Duration UInt8, - Sign Int8 -) -ENGINE = CollapsingMergeTree(Sign) -ORDER BY UserID -``` - -Inserción de los datos: - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1) -``` - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1),(4324182021466249494, 6, 185, 1) -``` - -Usamos dos `INSERT` consultas para crear dos partes de datos diferentes. Si insertamos los datos con una consulta, ClickHouse crea una parte de datos y nunca realizará ninguna fusión. - -Obtener los datos: - -``` sql -SELECT * FROM UAct -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -¿Qué vemos y dónde está colapsando? - -Con dos `INSERT` consultas, hemos creado 2 partes de datos. El `SELECT` la consulta se realizó en 2 hilos, y obtuvimos un orden aleatorio de filas. No se ha producido un colapso porque todavía no se había fusionado las partes de datos. ClickHouse fusiona parte de datos en un momento desconocido que no podemos predecir. - -Por lo tanto, necesitamos agregación: - -``` sql -SELECT - UserID, - sum(PageViews * Sign) AS PageViews, - sum(Duration * Sign) AS Duration -FROM UAct -GROUP BY UserID -HAVING sum(Sign) > 0 -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┐ -│ 4324182021466249494 │ 6 │ 185 │ -└─────────────────────┴───────────┴──────────┘ -``` - -Si no necesitamos agregación y queremos forzar el colapso, podemos usar `FINAL` modificador para `FROM` clausula. - -``` sql -SELECT * FROM UAct FINAL -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -Esta forma de seleccionar los datos es muy ineficiente. No lo use para mesas grandes. - -## Ejemplo de otro enfoque {#example-of-another-approach} - -Datos de ejemplo: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -│ 4324182021466249494 │ -5 │ -146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -La idea es que las fusiones tengan en cuenta solo los campos clave. Y en el “Cancel” línea podemos especificar valores negativos que igualan la versión anterior de la fila al sumar sin usar la columna Sign. Para este enfoque, es necesario cambiar el tipo de datos `PageViews`,`Duration` para almacenar valores negativos de UInt8 -\> Int16. - -``` sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews Int16, - Duration Int16, - Sign Int8 -) -ENGINE = CollapsingMergeTree(Sign) -ORDER BY UserID -``` - -Vamos a probar el enfoque: - -``` sql -insert into UAct values(4324182021466249494, 5, 146, 1); -insert into UAct values(4324182021466249494, -5, -146, -1); -insert into UAct values(4324182021466249494, 6, 185, 1); - -select * from UAct final; // avoid using final in production (just for a test or small tables) -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -``` sql -SELECT - UserID, - sum(PageViews) AS PageViews, - sum(Duration) AS Duration -FROM UAct -GROUP BY UserID -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┐ -│ 4324182021466249494 │ 6 │ 185 │ -└─────────────────────┴───────────┴──────────┘ -``` - -``` sqk -select count() FROM UAct -``` - -``` text -┌─count()─┐ -│ 3 │ -└─────────┘ -``` - -``` sql -optimize table UAct final; - -select * FROM UAct -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) diff --git a/docs/es/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/es/engines/table-engines/mergetree-family/custom-partitioning-key.md deleted file mode 100644 index 6cbc0a9192e..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 32 -toc_title: "Clave de partici\xF3n personalizada" ---- - -# Clave de partición personalizada {#custom-partitioning-key} - -La partición está disponible para el [Método de codificación de datos:](mergetree.md) mesas familiares (incluyendo [repetición](replication.md) tabla). [Vistas materializadas](../special/materializedview.md#materializedview) basado en tablas MergeTree soporte de particionamiento, también. - -Una partición es una combinación lógica de registros en una tabla por un criterio especificado. Puede establecer una partición por un criterio arbitrario, como por mes, por día o por tipo de evento. Cada partición se almacena por separado para simplificar las manipulaciones de estos datos. Al acceder a los datos, ClickHouse utiliza el subconjunto más pequeño de particiones posible. - -La partición se especifica en el `PARTITION BY expr` cláusula cuando [creando una tabla](mergetree.md#table_engine-mergetree-creating-a-table). La clave de partición puede ser cualquier expresión de las columnas de la tabla. Por ejemplo, para especificar la partición por mes, utilice la expresión `toYYYYMM(date_column)`: - -``` sql -CREATE TABLE visits -( - VisitDate Date, - Hour UInt8, - ClientID UUID -) -ENGINE = MergeTree() -PARTITION BY toYYYYMM(VisitDate) -ORDER BY Hour; -``` - -La clave de partición también puede ser una tupla de expresiones (similar a la [clave primaria](mergetree.md#primary-keys-and-indexes-in-queries)). Por ejemplo: - -``` sql -ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) -PARTITION BY (toMonday(StartDate), EventType) -ORDER BY (CounterID, StartDate, intHash32(UserID)); -``` - -En este ejemplo, establecemos la partición por los tipos de eventos que se produjeron durante la semana actual. - -Al insertar datos nuevos en una tabla, estos datos se almacenan como una parte separada (porción) ordenada por la clave principal. En 10-15 minutos después de insertar, las partes de la misma partición se fusionan en toda la parte. - -!!! info "INFO" - Una combinación solo funciona para partes de datos que tienen el mismo valor para la expresión de partición. Esto significa **no deberías hacer particiones demasiado granulares** (más de un millar de particiones). De lo contrario, el `SELECT` consulta funciona mal debido a un número excesivamente grande de archivos en el sistema de archivos y descriptores de archivos abiertos. - -Utilice el [sistema.parte](../../../operations/system-tables.md#system_tables-parts) tabla para ver las partes y particiones de la tabla. Por ejemplo, supongamos que tenemos un `visits` tabla con partición por mes. Vamos a realizar el `SELECT` consulta para el `system.parts` tabla: - -``` sql -SELECT - partition, - name, - active -FROM system.parts -WHERE table = 'visits' -``` - -``` text -┌─partition─┬─name───────────┬─active─┐ -│ 201901 │ 201901_1_3_1 │ 0 │ -│ 201901 │ 201901_1_9_2 │ 1 │ -│ 201901 │ 201901_8_8_0 │ 0 │ -│ 201901 │ 201901_9_9_0 │ 0 │ -│ 201902 │ 201902_4_6_1 │ 1 │ -│ 201902 │ 201902_10_10_0 │ 1 │ -│ 201902 │ 201902_11_11_0 │ 1 │ -└───────────┴────────────────┴────────┘ -``` - -El `partition` columna contiene los nombres de las particiones. Hay dos particiones en este ejemplo: `201901` y `201902`. Puede utilizar este valor de columna para especificar el nombre de partición en [ALTER … PARTITION](#alter_manipulations-with-partitions) consulta. - -El `name` columna contiene los nombres de las partes de datos de partición. Puede utilizar esta columna para especificar el nombre de la pieza [ALTER ATTACH PART](#alter_attach-partition) consulta. - -Vamos a desglosar el nombre de la primera parte: `201901_1_3_1`: - -- `201901` es el nombre de la partición. -- `1` es el número mínimo del bloque de datos. -- `3` es el número máximo del bloque de datos. -- `1` es el nivel de fragmento (la profundidad del árbol de fusión del que se forma). - -!!! info "INFO" - Las partes de las tablas de tipo antiguo tienen el nombre: `20190117_20190123_2_2_0` (fecha mínima - fecha máxima - número de bloque mínimo - número de bloque máximo - nivel). - -El `active` columna muestra el estado de la pieza. `1` está activo; `0` está inactivo. Las partes inactivas son, por ejemplo, las partes de origen que quedan después de fusionarse con una parte más grande. Las partes de datos dañadas también se indican como inactivas. - -Como puede ver en el ejemplo, hay varias partes separadas de la misma partición (por ejemplo, `201901_1_3_1` y `201901_1_9_2`). Esto significa que estas partes aún no están fusionadas. ClickHouse combina las partes insertadas de datos periódicamente, aproximadamente 15 minutos después de la inserción. Además, puede realizar una fusión no programada utilizando el [OPTIMIZE](../../../sql-reference/statements/misc.md#misc_operations-optimize) consulta. Ejemplo: - -``` sql -OPTIMIZE TABLE visits PARTITION 201902; -``` - -``` text -┌─partition─┬─name───────────┬─active─┐ -│ 201901 │ 201901_1_3_1 │ 0 │ -│ 201901 │ 201901_1_9_2 │ 1 │ -│ 201901 │ 201901_8_8_0 │ 0 │ -│ 201901 │ 201901_9_9_0 │ 0 │ -│ 201902 │ 201902_4_6_1 │ 0 │ -│ 201902 │ 201902_4_11_2 │ 1 │ -│ 201902 │ 201902_10_10_0 │ 0 │ -│ 201902 │ 201902_11_11_0 │ 0 │ -└───────────┴────────────────┴────────┘ -``` - -Las partes inactivas se eliminarán aproximadamente 10 minutos después de la fusión. - -Otra forma de ver un conjunto de partes y particiones es ir al directorio de la tabla: `/var/lib/clickhouse/data///`. Por ejemplo: - -``` bash -/var/lib/clickhouse/data/default/visits$ ls -l -total 40 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached -``` - -Carpeta ‘201901_1_1_0’, ‘201901_1_7_1’ y así sucesivamente son los directorios de las partes. Cada parte se relaciona con una partición correspondiente y contiene datos solo para un mes determinado (la tabla de este ejemplo tiene particiones por mes). - -El `detached` el directorio contiene partes que se separaron de la tabla utilizando el [DETACH](../../../sql-reference/statements/alter.md#alter_detach-partition) consulta. Las partes dañadas también se mueven a este directorio, en lugar de eliminarse. El servidor no utiliza las piezas del `detached` directory. You can add, delete, or modify the data in this directory at any time – the server will not know about this until you run the [ATTACH](../../../sql-reference/statements/alter.md#alter_attach-partition) consulta. - -Tenga en cuenta que en el servidor operativo, no puede cambiar manualmente el conjunto de piezas o sus datos en el sistema de archivos, ya que el servidor no lo sabrá. Para tablas no replicadas, puede hacer esto cuando se detiene el servidor, pero no se recomienda. Para tablas replicadas, el conjunto de piezas no se puede cambiar en ningún caso. - -ClickHouse le permite realizar operaciones con las particiones: eliminarlas, copiar de una tabla a otra o crear una copia de seguridad. Consulte la lista de todas las operaciones en la sección [Manipulaciones con particiones y piezas](../../../sql-reference/statements/alter.md#alter_manipulations-with-partitions). - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/es/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/es/engines/table-engines/mergetree-family/graphitemergetree.md deleted file mode 100644 index d33ddcebac2..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/graphitemergetree.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 38 -toc_title: GraphiteMergeTree ---- - -# GraphiteMergeTree {#graphitemergetree} - -Este motor está diseñado para el adelgazamiento y la agregación / promedio (rollup) [Grafito](http://graphite.readthedocs.io/en/latest/index.html) datos. Puede ser útil para los desarrolladores que desean usar ClickHouse como almacén de datos para Graphite. - -Puede usar cualquier motor de tabla ClickHouse para almacenar los datos de Graphite si no necesita un paquete acumulativo, pero si necesita un paquete acumulativo, use `GraphiteMergeTree`. El motor reduce el volumen de almacenamiento y aumenta la eficiencia de las consultas de Graphite. - -El motor hereda propiedades de [Método de codificación de datos:](mergetree.md). - -## Creación de una tabla {#creating-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - Path String, - Time DateTime, - Value , - Version - ... -) ENGINE = GraphiteMergeTree(config_section) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -Vea una descripción detallada del [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) consulta. - -Una tabla para los datos de grafito debe tener las siguientes columnas para los siguientes datos: - -- Nombre métrico (sensor de grafito). Tipo de datos: `String`. - -- Tiempo de medición de la métrica. Tipo de datos: `DateTime`. - -- Valor de la métrica. Tipo de datos: cualquier numérico. - -- Versión de la métrica. Tipo de datos: cualquier numérico. - - ClickHouse guarda las filas con la versión más alta o la última escrita si las versiones son las mismas. Otras filas se eliminan durante la fusión de partes de datos. - -Los nombres de estas columnas deben establecerse en la configuración acumulativa. - -**GraphiteMergeTree parámetros** - -- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. - -**Cláusulas de consulta** - -Al crear un `GraphiteMergeTree` mesa, la misma [clausula](mergetree.md#table_engine-mergetree-creating-a-table) se requieren, como al crear un `MergeTree` tabla. - -
- -Método obsoleto para crear una tabla - -!!! attention "Atención" - No use este método en proyectos nuevos y, si es posible, cambie los proyectos antiguos al método descrito anteriormente. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - EventDate Date, - Path String, - Time DateTime, - Value , - Version - ... -) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) -``` - -Todos los parámetros excepto `config_section` el mismo significado que en `MergeTree`. - -- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. - -
- -## Configuración acumulativa {#rollup-configuration} - -La configuración del paquete acumulativo está definida por [graphite_rollup](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-graphite) parámetro en la configuración del servidor. El nombre del parámetro podría ser cualquiera. Puede crear varias configuraciones y usarlas para diferentes tablas. - -Estructura de configuración Rollup: - - required-columns - patterns - -### Columnas requeridas {#required-columns} - -- `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. -- `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. -- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. Valor predeterminado: `Value`. -- `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. - -### Patrón {#patterns} - -Estructura del `patterns` apartado: - -``` text -pattern - regexp - function -pattern - regexp - age + precision - ... -pattern - regexp - function - age + precision - ... -pattern - ... -default - function - age + precision - ... -``` - -!!! warning "Atención" - Los patrones deben ser estrictamente ordenados: - - 1. Patterns without `function` or `retention`. - 1. Patterns with both `function` and `retention`. - 1. Pattern `default`. - -Al procesar una fila, ClickHouse comprueba las reglas en el `pattern` apartado. Cada uno de `pattern` (incluir `default` secciones pueden contener `function` parámetro para la agregación, `retention` parámetros o ambos. Si el nombre de la métrica coincide con `regexp`, las reglas de la `pattern` sección (o secciones); de lo contrario, las reglas de la `default` sección se utilizan. - -Campos para `pattern` y `default` apartado: - -- `regexp`– A pattern for the metric name. -- `age` – The minimum age of the data in seconds. -- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). -- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. - -### Ejemplo de configuración {#configuration-example} - -``` xml - - Version - - click_cost - any - - 0 - 5 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - -``` - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/es/engines/table-engines/mergetree-family/index.md b/docs/es/engines/table-engines/mergetree-family/index.md deleted file mode 100644 index 359d58b2ff1..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Familia MergeTree -toc_priority: 28 ---- - - diff --git a/docs/es/engines/table-engines/mergetree-family/mergetree.md b/docs/es/engines/table-engines/mergetree-family/mergetree.md deleted file mode 100644 index a4bab840b52..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/mergetree.md +++ /dev/null @@ -1,654 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 30 -toc_title: "M\xE9todo de codificaci\xF3n de datos:" ---- - -# Método de codificación de datos: {#table_engines-mergetree} - -El `MergeTree` motor y otros motores de esta familia (`*MergeTree`) son los motores de mesa ClickHouse más robustos. - -Motores en el `MergeTree` familia están diseñados para insertar una gran cantidad de datos en una tabla. Los datos se escriben rápidamente en la tabla parte por parte, luego se aplican reglas para fusionar las partes en segundo plano. Este método es mucho más eficiente que reescribir continuamente los datos en almacenamiento durante la inserción. - -Principales características: - -- Almacena datos ordenados por clave principal. - - Esto le permite crear un pequeño índice disperso que ayuda a encontrar datos más rápido. - -- Las particiones se pueden utilizar si [clave de partición](custom-partitioning-key.md) se especifica. - - ClickHouse admite ciertas operaciones con particiones que son más efectivas que las operaciones generales en los mismos datos con el mismo resultado. ClickHouse también corta automáticamente los datos de partición donde se especifica la clave de partición en la consulta. Esto también mejora el rendimiento de las consultas. - -- Soporte de replicación de datos. - - La familia de `ReplicatedMergeTree` proporciona la replicación de datos. Para obtener más información, consulte [Replicación de datos](replication.md). - -- Soporte de muestreo de datos. - - Si es necesario, puede establecer el método de muestreo de datos en la tabla. - -!!! info "INFO" - El [Fusionar](../special/merge.md#merge) el motor no pertenece al `*MergeTree` familia. - -## Creación de una tabla {#table_engine-mergetree-creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], - ... - INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, - INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 -) ENGINE = MergeTree() -[PARTITION BY expr] -[ORDER BY expr] -[PRIMARY KEY expr] -[SAMPLE BY expr] -[TTL expr [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'], ...] -[SETTINGS name=value, ...] -``` - -Para obtener una descripción de los parámetros, consulte [Descripción de la consulta CREATE](../../../sql-reference/statements/create.md). - -!!! note "Nota" - `INDEX` es una característica experimental, ver [Índices de saltos de datos](#table_engine-mergetree-data_skipping-indexes). - -### Cláusulas de consulta {#mergetree-query-clauses} - -- `ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. El `MergeTree` el motor no tiene parámetros. - -- `PARTITION BY` — The [clave de partición](custom-partitioning-key.md). - - Para particionar por mes, utilice el `toYYYYMM(date_column)` expresión, donde `date_column` es una columna con una fecha del tipo [Fecha](../../../sql-reference/data-types/date.md). Los nombres de partición aquí tienen el `"YYYYMM"` formato. - -- `ORDER BY` — The sorting key. - - Una tupla de columnas o expresiones arbitrarias. Ejemplo: `ORDER BY (CounterID, EventDate)`. - -- `PRIMARY KEY` — The primary key if it [difiere de la clave de clasificación](#choosing-a-primary-key-that-differs-from-the-sorting-key). - - De forma predeterminada, la clave principal es la misma que la clave de ordenación (que se especifica `ORDER BY` clausula). Por lo tanto, en la mayoría de los casos no es necesario especificar un `PRIMARY KEY` clausula. - -- `SAMPLE BY` — An expression for sampling. - - Si se utiliza una expresión de muestreo, la clave principal debe contenerla. Ejemplo: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. - -- `TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [entre discos y volúmenes](#table_engine-mergetree-multiple-volumes). - - La expresión debe tener una `Date` o `DateTime` columna como resultado. Ejemplo: - `TTL date + INTERVAL 1 DAY` - - Tipo de regla `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'` especifica una acción que debe realizarse con la pieza si la expresión está satisfecha (alcanza la hora actual): eliminación de filas caducadas, mover una pieza (si la expresión está satisfecha para todas las filas de una pieza) al disco especificado (`TO DISK 'xxx'`) o al volumen (`TO VOLUME 'xxx'`). El tipo predeterminado de la regla es la eliminación (`DELETE`). Se puede especificar una lista de varias reglas, pero no debe haber más de una `DELETE` regla. - - Para obtener más información, consulte [TTL para columnas y tablas](#table_engine-mergetree-ttl) - -- `SETTINGS` — Additional parameters that control the behavior of the `MergeTree`: - - - `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Almacenamiento de datos](#mergetree-data-storage). - - `index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [Almacenamiento de datos](#mergetree-data-storage). - - `enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` configuración. Antes de la versión 19.11, sólo existía el `index_granularity` ajuste para restringir el tamaño del gránulo. El `index_granularity_bytes` mejora el rendimiento de ClickHouse al seleccionar datos de tablas con filas grandes (decenas y cientos de megabytes). Si tiene tablas con filas grandes, puede habilitar esta configuración para que las tablas mejoren la eficiencia de `SELECT` consulta. - - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, entonces ZooKeeper almacena menos datos. Para obtener más información, consulte [descripción del ajuste](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) en “Server configuration parameters”. - - `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse lee y escribe los datos en el disco de almacenamiento utilizando la interfaz de E / S directa (`O_DIRECT` opcion). Si `min_merge_bytes_to_use_direct_io = 0`, entonces la E/S directa está deshabilitada. Valor predeterminado: `10 * 1024 * 1024 * 1024` byte. - - - `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day). - - `write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Don't turn it off. - - `merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192. - - `storage_policy` — Storage policy. See [Uso de varios dispositivos de bloque para el almacenamiento de datos](#table_engine-mergetree-multiple-volumes). - -**Ejemplo de configuración de secciones** - -``` sql -ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 -``` - -En el ejemplo, configuramos la partición por mes. - -También establecemos una expresión para el muestreo como un hash por el ID de usuario. Esto le permite pseudoaleatorizar los datos en la tabla para cada `CounterID` y `EventDate`. Si define un [SAMPLE](../../../sql-reference/statements/select/sample.md#select-sample-clause) cláusula al seleccionar los datos, ClickHouse devolverá una muestra de datos pseudoaleatoria uniforme para un subconjunto de usuarios. - -El `index_granularity` se puede omitir porque 8192 es el valor predeterminado. - -
- -Método obsoleto para crear una tabla - -!!! attention "Atención" - No utilice este método en nuevos proyectos. Si es posible, cambie los proyectos antiguos al método descrito anteriormente. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) -``` - -**Parámetros MergeTree()** - -- `date-column` — The name of a column of the [Fecha](../../../sql-reference/data-types/date.md) tipo. ClickHouse crea automáticamente particiones por mes en función de esta columna. Los nombres de partición están en el `"YYYYMM"` formato. -- `sampling_expression` — An expression for sampling. -- `(primary, key)` — Primary key. Type: [Tupla()](../../../sql-reference/data-types/tuple.md) -- `index_granularity` — The granularity of an index. The number of data rows between the “marks” de un índice. El valor 8192 es apropiado para la mayoría de las tareas. - -**Ejemplo** - -``` sql -MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) -``` - -El `MergeTree` engine se configura de la misma manera que en el ejemplo anterior para el método de configuración del motor principal. -
- -## Almacenamiento de datos {#mergetree-data-storage} - -Una tabla consta de partes de datos ordenadas por clave principal. - -Cuando se insertan datos en una tabla, se crean partes de datos separadas y cada una de ellas se ordena lexicográficamente por clave principal. Por ejemplo, si la clave principal es `(CounterID, Date)`, los datos en la parte se ordenan por `CounterID`, y dentro de cada `CounterID` es ordenado por `Date`. - -Los datos que pertenecen a diferentes particiones se separan en diferentes partes. En el fondo, ClickHouse combina partes de datos para un almacenamiento más eficiente. Las piezas que pertenecen a particiones diferentes no se fusionan. El mecanismo de combinación no garantiza que todas las filas con la misma clave principal estén en la misma parte de datos. - -Cada parte de datos se divide lógicamente en gránulos. Un gránulo es el conjunto de datos indivisibles más pequeño que ClickHouse lee al seleccionar datos. ClickHouse no divide filas o valores, por lo que cada gránulo siempre contiene un número entero de filas. La primera fila de un gránulo está marcada con el valor de la clave principal de la fila. Para cada parte de datos, ClickHouse crea un archivo de índice que almacena las marcas. Para cada columna, ya sea en la clave principal o no, ClickHouse también almacena las mismas marcas. Estas marcas le permiten encontrar datos directamente en archivos de columnas. - -El tamaño del gránulo es restringido por `index_granularity` y `index_granularity_bytes` configuración del motor de tabla. El número de filas en un gránulo se encuentra en el `[1, index_granularity]` rango, dependiendo del tamaño de las filas. El tamaño de un gránulo puede exceder `index_granularity_bytes` si el tamaño de una sola fila es mayor que el valor de la configuración. En este caso, el tamaño del gránulo es igual al tamaño de la fila. - -## Claves e índices principales en consultas {#primary-keys-and-indexes-in-queries} - -Tome el `(CounterID, Date)` clave primaria como ejemplo. En este caso, la clasificación y el índice se pueden ilustrar de la siguiente manera: - - Whole data: [---------------------------------------------] - CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] - Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] - Marks: | | | | | | | | | | | - a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 - Marks numbers: 0 1 2 3 4 5 6 7 8 9 10 - -Si la consulta de datos especifica: - -- `CounterID in ('a', 'h')`, el servidor lee los datos en los rangos de marcas `[0, 3)` y `[6, 8)`. -- `CounterID IN ('a', 'h') AND Date = 3`, el servidor lee los datos en los rangos de marcas `[1, 3)` y `[7, 8)`. -- `Date = 3`, el servidor lee los datos en el rango de marcas `[1, 10]`. - -Los ejemplos anteriores muestran que siempre es más efectivo usar un índice que un análisis completo. - -Un índice disperso permite leer datos adicionales. Al leer un único rango de la clave primaria, hasta `index_granularity * 2` se pueden leer filas adicionales en cada bloque de datos. - -Los índices dispersos le permiten trabajar con una gran cantidad de filas de tabla, porque en la mayoría de los casos, dichos índices caben en la RAM de la computadora. - -ClickHouse no requiere una clave principal única. Puede insertar varias filas con la misma clave principal. - -### Selección de la clave principal {#selecting-the-primary-key} - -El número de columnas en la clave principal no está explícitamente limitado. Dependiendo de la estructura de datos, puede incluir más o menos columnas en la clave principal. Esto puede: - -- Mejorar el rendimiento de un índice. - - Si la clave principal es `(a, b)`, a continuación, añadir otra columna `c` mejorará el rendimiento si se cumplen las siguientes condiciones: - - - Hay consultas con una condición en la columna `c`. - - Rangos de datos largos (varias veces más `index_granularity`) con valores idénticos para `(a, b)` son comunes. En otras palabras, al agregar otra columna le permite omitir rangos de datos bastante largos. - -- Mejorar la compresión de datos. - - ClickHouse ordena los datos por clave principal, por lo que cuanto mayor sea la consistencia, mejor será la compresión. - -- Proporcione una lógica adicional al fusionar partes de datos en el [ColapsarMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) y [SummingMergeTree](summingmergetree.md) motor. - - En este caso tiene sentido especificar el *clave de clasificación* que es diferente de la clave principal. - -Una clave principal larga afectará negativamente al rendimiento de la inserción y al consumo de memoria, pero las columnas adicionales de la clave principal no afectarán al rendimiento de ClickHouse durante `SELECT` consulta. - -### Elegir una clave principal que difiere de la clave de ordenación {#choosing-a-primary-key-that-differs-from-the-sorting-key} - -Es posible especificar una clave principal (una expresión con valores que se escriben en el archivo de índice para cada marca) que es diferente de la clave de ordenación (una expresión para ordenar las filas en partes de datos). En este caso, la tupla de expresión de clave primaria debe ser un prefijo de la tupla de expresión de clave de ordenación. - -Esta característica es útil cuando se [SummingMergeTree](summingmergetree.md) y -[AgregaciónMergeTree](aggregatingmergetree.md) motores de mesa. En un caso común cuando se utilizan estos motores, la tabla tiene dos tipos de columnas: *cota* y *medida*. Las consultas típicas agregan valores de columnas de medida con `GROUP BY` y filtrado por dimensiones. Debido a que SummingMergeTree y AggregatingMergeTree agregan filas con el mismo valor de la clave de ordenación, es natural agregarle todas las dimensiones. Como resultado, la expresión de clave consta de una larga lista de columnas y esta lista debe actualizarse con frecuencia con las dimensiones recién agregadas. - -En este caso, tiene sentido dejar solo unas pocas columnas en la clave principal que proporcionarán análisis de rango eficientes y agregarán las columnas de dimensión restantes a la tupla de clave de clasificación. - -[ALTER](../../../sql-reference/statements/alter.md) de la clave de ordenación es una operación ligera porque cuando se agrega una nueva columna simultáneamente a la tabla y a la clave de ordenación, las partes de datos existentes no necesitan ser cambiadas. Dado que la clave de ordenación anterior es un prefijo de la nueva clave de ordenación y no hay datos en la columna recién agregada, los datos se ordenan tanto por las claves de ordenación antiguas como por las nuevas en el momento de la modificación de la tabla. - -### Uso de índices y particiones en consultas {#use-of-indexes-and-partitions-in-queries} - -Para `SELECT` consultas, ClickHouse analiza si se puede usar un índice. Se puede usar un índice si el `WHERE/PREWHERE` clause tiene una expresión (como uno de los elementos de conjunción, o enteramente) que representa una operación de comparación de igualdad o desigualdad, o si tiene `IN` o `LIKE` con un prefijo fijo en columnas o expresiones que están en la clave principal o clave de partición, o en ciertas funciones parcialmente repetitivas de estas columnas, o relaciones lógicas de estas expresiones. - -Por lo tanto, es posible ejecutar rápidamente consultas en uno o varios rangos de la clave principal. En este ejemplo, las consultas serán rápidas cuando se ejecuten para una etiqueta de seguimiento específica, para una etiqueta y un intervalo de fechas específicos, para una etiqueta y una fecha específicas, para varias etiquetas con un intervalo de fechas, etc. - -Veamos el motor configurado de la siguiente manera: - - ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 - -En este caso, en consultas: - -``` sql -SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 -SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) -SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) -``` - -ClickHouse utilizará el índice de clave principal para recortar datos incorrectos y la clave de partición mensual para recortar particiones que están en intervalos de fechas incorrectos. - -Las consultas anteriores muestran que el índice se usa incluso para expresiones complejas. La lectura de la tabla está organizada de modo que el uso del índice no puede ser más lento que un escaneo completo. - -En el siguiente ejemplo, el índice no se puede usar. - -``` sql -SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' -``` - -Para comprobar si ClickHouse puede usar el índice al ejecutar una consulta, use la configuración [Fecha de nacimiento](../../../operations/settings/settings.md#settings-force_index_by_date) y [force_primary_key](../../../operations/settings/settings.md). - -La clave para particionar por mes permite leer solo aquellos bloques de datos que contienen fechas del rango adecuado. En este caso, el bloque de datos puede contener datos para muchas fechas (hasta un mes). Dentro de un bloque, los datos se ordenan por clave principal, que puede no contener la fecha como la primera columna. Debido a esto, el uso de una consulta con solo una condición de fecha que no especifica el prefijo de clave principal hará que se lean más datos que para una sola fecha. - -### Uso del índice para claves primarias parcialmente monótonas {#use-of-index-for-partially-monotonic-primary-keys} - -Considere, por ejemplo, los días del mes. Ellos forman un [monótona secuencia](https://en.wikipedia.org/wiki/Monotonic_function) durante un mes, pero no monótono durante períodos más prolongados. Esta es una secuencia parcialmente monotónica. Si un usuario crea la tabla con clave primaria parcialmente monótona, ClickHouse crea un índice disperso como de costumbre. Cuando un usuario selecciona datos de este tipo de tabla, ClickHouse analiza las condiciones de consulta. Si el usuario desea obtener datos entre dos marcas del índice y ambas marcas caen dentro de un mes, ClickHouse puede usar el índice en este caso particular porque puede calcular la distancia entre los parámetros de una consulta y las marcas de índice. - -ClickHouse no puede usar un índice si los valores de la clave principal en el rango de parámetros de consulta no representan una secuencia monotónica. En este caso, ClickHouse utiliza el método de análisis completo. - -ClickHouse usa esta lógica no solo para secuencias de días del mes, sino para cualquier clave principal que represente una secuencia parcialmente monotónica. - -### Índices de saltos de datos (experimental) {#table_engine-mergetree-data_skipping-indexes} - -La declaración de índice se encuentra en la sección de columnas del `CREATE` consulta. - -``` sql -INDEX index_name expr TYPE type(...) GRANULARITY granularity_value -``` - -Para tablas de la `*MergeTree` familia, se pueden especificar índices de omisión de datos. - -Estos índices agregan cierta información sobre la expresión especificada en bloques, que consisten en `granularity_value` gránulos (el tamaño del gránulo se especifica utilizando el `index_granularity` ajuste en el motor de la tabla). Entonces estos agregados se usan en `SELECT` consultas para reducir la cantidad de datos a leer desde el disco omitiendo grandes bloques de datos donde el `where` consulta no puede ser satisfecha. - -**Ejemplo** - -``` sql -CREATE TABLE table_name -( - u64 UInt64, - i32 Int32, - s String, - ... - INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, - INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 -) ENGINE = MergeTree() -... -``` - -ClickHouse puede utilizar los índices del ejemplo para reducir la cantidad de datos que se leen desde el disco en las siguientes consultas: - -``` sql -SELECT count() FROM table WHERE s < 'z' -SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 -``` - -#### Tipos de índices disponibles {#available-types-of-indices} - -- `minmax` - - Almacena los extremos de la expresión especificada (si la expresión `tuple`, entonces almacena extremos para cada elemento de `tuple`), utiliza información almacenada para omitir bloques de datos como la clave principal. - -- `set(max_rows)` - - Almacena valores únicos de la expresión especificada (no más de `max_rows` filas, `max_rows=0` medio “no limits”). Utiliza los valores para comprobar si `WHERE` expresión no es satisfactorio en un bloque de datos. - -- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - - Tiendas a [Filtro de floración](https://en.wikipedia.org/wiki/Bloom_filter) que contiene todos los ngrams de un bloque de datos. Funciona solo con cadenas. Puede ser utilizado para la optimización de `equals`, `like` y `in` expresiones. - - - `n` — ngram size, - - `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well). - - `number_of_hash_functions` — The number of hash functions used in the Bloom filter. - - `random_seed` — The seed for Bloom filter hash functions. - -- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - - Lo mismo que `ngrambf_v1`, pero almacena tokens en lugar de ngrams. Los tokens son secuencias separadas por caracteres no alfanuméricos. - -- `bloom_filter([false_positive])` — Stores a [Filtro de floración](https://en.wikipedia.org/wiki/Bloom_filter) para las columnas especificadas. - - Opcional `false_positive` parámetro es la probabilidad de recibir una respuesta falsa positiva del filtro. Valores posibles: (0, 1). Valor predeterminado: 0.025. - - Tipos de datos admitidos: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`. - - Las siguientes funciones pueden usarlo: [igual](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [en](../../../sql-reference/functions/in-functions.md), [noEn](../../../sql-reference/functions/in-functions.md), [tener](../../../sql-reference/functions/array-functions.md). - - - -``` sql -INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 -INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 -INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 -``` - -#### Funciones de apoyo {#functions-support} - -Condiciones en el `WHERE` cláusula contiene llamadas de las funciones que operan con columnas. Si la columna forma parte de un índice, ClickHouse intenta usar este índice al realizar las funciones. ClickHouse admite diferentes subconjuntos de funciones para usar índices. - -El `set` index se puede utilizar con todas las funciones. Subconjuntos de funciones para otros índices se muestran en la siguiente tabla. - -| Función (operador) / Índice | clave primaria | minmax | Descripción | Sistema abierto. | bloom_filter | -|----------------------------------------------------------------------------------------------------------|----------------|--------|-------------|------------------|---------------| -| [igual (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, \<\>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [como](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [No como](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [Comienza con](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [Finaliza con](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [en](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [noEn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [menos (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [mayor (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [menosOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [mayorOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [vaciar](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | - -Las funciones con un argumento constante que es menor que el tamaño de ngram no pueden ser utilizadas por `ngrambf_v1` para la optimización de consultas. - -Los filtros Bloom pueden tener coincidencias falsas positivas, por lo que `ngrambf_v1`, `tokenbf_v1`, y `bloom_filter` los índices no se pueden usar para optimizar consultas donde se espera que el resultado de una función sea falso, por ejemplo: - -- Puede ser optimizado: - - `s LIKE '%test%'` - - `NOT s NOT LIKE '%test%'` - - `s = 1` - - `NOT s != 1` - - `startsWith(s, 'test')` -- No se puede optimizar: - - `NOT s LIKE '%test%'` - - `s NOT LIKE '%test%'` - - `NOT s = 1` - - `s != 1` - - `NOT startsWith(s, 'test')` - -## Acceso a datos simultáneos {#concurrent-data-access} - -Para el acceso simultáneo a tablas, usamos versiones múltiples. En otras palabras, cuando una tabla se lee y actualiza simultáneamente, los datos se leen de un conjunto de partes que está actualizado en el momento de la consulta. No hay cerraduras largas. Las inserciones no se interponen en el camino de las operaciones de lectura. - -La lectura de una tabla se paralela automáticamente. - -## TTL para columnas y tablas {#table_engine-mergetree-ttl} - -Determina la duración de los valores. - -El `TTL` se puede establecer para toda la tabla y para cada columna individual. TTL de nivel de tabla también puede especificar la lógica de movimiento automático de datos entre discos y volúmenes. - -Las expresiones deben evaluar [Fecha](../../../sql-reference/data-types/date.md) o [FechaHora](../../../sql-reference/data-types/datetime.md) tipo de datos. - -Ejemplo: - -``` sql -TTL time_column -TTL time_column + interval -``` - -Definir `interval`, utilizar [intervalo de tiempo](../../../sql-reference/operators/index.md#operators-datetime) operador. - -``` sql -TTL date_time + INTERVAL 1 MONTH -TTL date_time + INTERVAL 15 HOUR -``` - -### Columna TTL {#mergetree-column-ttl} - -Cuando los valores de la columna caducan, ClickHouse los reemplaza con los valores predeterminados para el tipo de datos de columna. Si todos los valores de columna en la parte de datos caducan, ClickHouse elimina esta columna de la parte de datos en un sistema de archivos. - -El `TTL` cláusula no se puede utilizar para columnas clave. - -Ejemplos: - -Creación de una tabla con TTL - -``` sql -CREATE TABLE example_table -( - d DateTime, - a Int TTL d + INTERVAL 1 MONTH, - b Int TTL d + INTERVAL 1 MONTH, - c String -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d; -``` - -Adición de TTL a una columna de una tabla existente - -``` sql -ALTER TABLE example_table - MODIFY COLUMN - c String TTL d + INTERVAL 1 DAY; -``` - -Modificación de TTL de la columna - -``` sql -ALTER TABLE example_table - MODIFY COLUMN - c String TTL d + INTERVAL 1 MONTH; -``` - -### Tabla TTL {#mergetree-table-ttl} - -La tabla puede tener una expresión para la eliminación de filas caducadas y varias expresiones para el movimiento automático de partes entre [discos o volúmenes](#table_engine-mergetree-multiple-volumes). Cuando las filas de la tabla caducan, ClickHouse elimina todas las filas correspondientes. Para la entidad de movimiento de piezas, todas las filas de una pieza deben cumplir los criterios de expresión de movimiento. - -``` sql -TTL expr [DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'], ... -``` - -El tipo de regla TTL puede seguir cada expresión TTL. Afecta a una acción que debe realizarse una vez que se satisface la expresión (alcanza la hora actual): - -- `DELETE` - Eliminar filas caducadas (acción predeterminada); -- `TO DISK 'aaa'` - mover parte al disco `aaa`; -- `TO VOLUME 'bbb'` - mover parte al disco `bbb`. - -Ejemplos: - -Creación de una tabla con TTL - -``` sql -CREATE TABLE example_table -( - d DateTime, - a Int -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d -TTL d + INTERVAL 1 MONTH [DELETE], - d + INTERVAL 1 WEEK TO VOLUME 'aaa', - d + INTERVAL 2 WEEK TO DISK 'bbb'; -``` - -Modificación de TTL de la tabla - -``` sql -ALTER TABLE example_table - MODIFY TTL d + INTERVAL 1 DAY; -``` - -**Eliminación de datos** - -Los datos con un TTL caducado se eliminan cuando ClickHouse fusiona partes de datos. - -Cuando ClickHouse ve que los datos han caducado, realiza una combinación fuera de programación. Para controlar la frecuencia de tales fusiones, puede establecer `merge_with_ttl_timeout`. Si el valor es demasiado bajo, realizará muchas fusiones fuera de horario que pueden consumir muchos recursos. - -Si realiza el `SELECT` consulta entre fusiones, puede obtener datos caducados. Para evitarlo, use el [OPTIMIZE](../../../sql-reference/statements/misc.md#misc_operations-optimize) consulta antes `SELECT`. - -## Uso de varios dispositivos de bloque para el almacenamiento de datos {#table_engine-mergetree-multiple-volumes} - -### Implantación {#introduction} - -`MergeTree` Los motores de tablas familiares pueden almacenar datos en múltiples dispositivos de bloque. Por ejemplo, puede ser útil cuando los datos de una determinada tabla se dividen implícitamente en “hot” y “cold”. Los datos más recientes se solicitan regularmente, pero solo requieren una pequeña cantidad de espacio. Por el contrario, los datos históricos de cola gorda se solicitan raramente. Si hay varios discos disponibles, el “hot” los datos pueden estar ubicados en discos rápidos (por ejemplo, SSD NVMe o en memoria), mientras que “cold” datos - en los relativamente lentos (por ejemplo, HDD). - -La parte de datos es la unidad móvil mínima para `MergeTree`-mesas de motor. Los datos que pertenecen a una parte se almacenan en un disco. Las partes de datos se pueden mover entre discos en segundo plano (según la configuración del usuario) así como por medio de la [ALTER](../../../sql-reference/statements/alter.md#alter_move-partition) consulta. - -### Plazo {#terms} - -- Disk — Block device mounted to the filesystem. -- Default disk — Disk that stores the path specified in the [camino](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) configuración del servidor. -- Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). -- Storage policy — Set of volumes and the rules for moving data between them. - -Los nombres dados a las entidades descritas se pueden encontrar en las tablas del sistema, [sistema.almacenamiento_policies](../../../operations/system-tables.md#system_tables-storage_policies) y [sistema.disco](../../../operations/system-tables.md#system_tables-disks). Para aplicar una de las directivas de almacenamiento configuradas para una tabla, `storage_policy` establecimiento de `MergeTree`-mesas de la familia del motor. - -### Configuración {#table_engine-mergetree-multiple-volumes_configure} - -Los discos, los volúmenes y las políticas de almacenamiento deben declararse `` etiqueta ya sea en el archivo principal `config.xml` o en un archivo distinto en el `config.d` directorio. - -Estructura de configuración: - -``` xml - - - - /mnt/fast_ssd/clickhouse/ - - - /mnt/hdd1/clickhouse/ - 10485760 - - - /mnt/hdd2/clickhouse/ - 10485760 - - - ... - - - ... - -``` - -Tags: - -- `` — Disk name. Names must be different for all disks. -- `path` — path under which a server will store data (`data` y `shadow` carpetas), debe terminarse con ‘/’. -- `keep_free_space_bytes` — the amount of free disk space to be reserved. - -El orden de la definición del disco no es importante. - -Marcado de configuración de directivas de almacenamiento: - -``` xml - - ... - - - - - disk_name_from_disks_configuration - 1073741824 - - - - - - - 0.2 - - - - - - - - ... - -``` - -Tags: - -- `policy_name_N` — Policy name. Policy names must be unique. -- `volume_name_N` — Volume name. Volume names must be unique. -- `disk` — a disk within a volume. -- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume's disks. -- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1). - -Cofiguration ejemplos: - -``` xml - - ... - - - - - disk1 - disk2 - - - - - - - - fast_ssd - 1073741824 - - - disk1 - - - 0.2 - - - ... - -``` - -En un ejemplo dado, el `hdd_in_order` la política implementa el [Ronda-robin](https://en.wikipedia.org/wiki/Round-robin_scheduling) enfoque. Por lo tanto, esta política define solo un volumen (`single`), las partes de datos se almacenan en todos sus discos en orden circular. Dicha política puede ser bastante útil si hay varios discos similares montados en el sistema, pero RAID no está configurado. Tenga en cuenta que cada unidad de disco individual no es confiable y es posible que desee compensarlo con un factor de replicación de 3 o más. - -Si hay diferentes tipos de discos disponibles en el sistema, `moving_from_ssd_to_hdd` política se puede utilizar en su lugar. Volumen `hot` consta de un disco SSD (`fast_ssd`), y el tamaño máximo de una pieza que se puede almacenar en este volumen es de 1 GB. Todas las piezas con el tamaño más grande que 1GB serán almacenadas directamente en `cold` volumen, que contiene un disco duro `disk1`. -Además, una vez que el disco `fast_ssd` se llena en más del 80%, los datos se transferirán al `disk1` por un proceso en segundo plano. - -El orden de enumeración de volúmenes dentro de una directiva de almacenamiento es importante. Una vez que un volumen está sobrellenado, los datos se mueven al siguiente. El orden de la enumeración del disco también es importante porque los datos se almacenan en ellos por turnos. - -Al crear una tabla, se puede aplicarle una de las directivas de almacenamiento configuradas: - -``` sql -CREATE TABLE table_with_non_default_policy ( - EventDate Date, - OrderID UInt64, - BannerID UInt64, - SearchPhrase String -) ENGINE = MergeTree -ORDER BY (OrderID, BannerID) -PARTITION BY toYYYYMM(EventDate) -SETTINGS storage_policy = 'moving_from_ssd_to_hdd' -``` - -El `default` política de almacenamiento implica el uso de un solo volumen, que consiste en un solo disco dado en ``. Una vez que se crea una tabla, no se puede cambiar su política de almacenamiento. - -### Detalles {#details} - -En el caso de `MergeTree` tablas, los datos están llegando al disco de diferentes maneras: - -- Como resultado de un inserto (`INSERT` consulta). -- Durante las fusiones de fondo y [mutación](../../../sql-reference/statements/alter.md#alter-mutations). -- Al descargar desde otra réplica. -- Como resultado de la congelación de particiones [ALTER TABLE … FREEZE PARTITION](../../../sql-reference/statements/alter.md#alter_freeze-partition). - -En todos estos casos, excepto las mutaciones y la congelación de particiones, una pieza se almacena en un volumen y un disco de acuerdo con la política de almacenamiento dada: - -1. El primer volumen (en el orden de definición) que tiene suficiente espacio en disco para almacenar una pieza (`unreserved_space > current_part_size`) y permite almacenar partes de un tamaño determinado (`max_data_part_size_bytes > current_part_size`) se elige. -2. Dentro de este volumen, se elige ese disco que sigue al que se utilizó para almacenar el fragmento de datos anterior y que tiene espacio libre más que el tamaño de la pieza (`unreserved_space - keep_free_space_bytes > current_part_size`). - -Bajo el capó, las mutaciones y la congelación de particiones hacen uso de [enlaces duros](https://en.wikipedia.org/wiki/Hard_link). Los enlaces duros entre diferentes discos no son compatibles, por lo tanto, en tales casos las partes resultantes se almacenan en los mismos discos que los iniciales. - -En el fondo, las partes se mueven entre volúmenes en función de la cantidad de espacio libre (`move_factor` parámetro) según el orden en que se declaran los volúmenes en el archivo de configuración. -Los datos nunca se transfieren desde el último y al primero. Uno puede usar tablas del sistema [sistema.part_log](../../../operations/system-tables.md#system_tables-part-log) (campo `type = MOVE_PART`) y [sistema.parte](../../../operations/system-tables.md#system_tables-parts) (campo `path` y `disk`) para monitorear movimientos de fondo. Además, la información detallada se puede encontrar en los registros del servidor. - -El usuario puede forzar el movimiento de una pieza o una partición de un volumen a otro mediante la consulta [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql-reference/statements/alter.md#alter_move-partition), todas las restricciones para las operaciones en segundo plano se tienen en cuenta. La consulta inicia un movimiento por sí misma y no espera a que se completen las operaciones en segundo plano. El usuario recibirá un mensaje de error si no hay suficiente espacio libre disponible o si no se cumple alguna de las condiciones requeridas. - -Mover datos no interfiere con la replicación de datos. Por lo tanto, se pueden especificar diferentes directivas de almacenamiento para la misma tabla en diferentes réplicas. - -Después de la finalización de las fusiones y mutaciones de fondo, las partes viejas se eliminan solo después de un cierto período de tiempo (`old_parts_lifetime`). -Durante este tiempo, no se mueven a otros volúmenes o discos. Por lo tanto, hasta que las partes finalmente se eliminen, aún se tienen en cuenta para la evaluación del espacio en disco ocupado. - -[Artículo Original](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) diff --git a/docs/es/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/es/engines/table-engines/mergetree-family/replacingmergetree.md deleted file mode 100644 index a1e95c5b5f4..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/replacingmergetree.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 33 -toc_title: ReplacingMergeTree ---- - -# ReplacingMergeTree {#replacingmergetree} - -El motor difiere de [Método de codificación de datos:](mergetree.md#table_engines-mergetree) en que elimina las entradas duplicadas con el mismo valor de clave principal (o más exactamente, con el mismo [clave de clasificación](mergetree.md) valor). - -La desduplicación de datos solo se produce durante una fusión. La fusión ocurre en segundo plano en un momento desconocido, por lo que no puede planificarla. Algunos de los datos pueden permanecer sin procesar. Aunque puede ejecutar una fusión no programada utilizando el `OPTIMIZE` consulta, no cuente con usarlo, porque el `OPTIMIZE` consulta leerá y escribirá una gran cantidad de datos. - -Así, `ReplacingMergeTree` es adecuado para borrar datos duplicados en segundo plano para ahorrar espacio, pero no garantiza la ausencia de duplicados. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = ReplacingMergeTree([ver]) -[PARTITION BY expr] -[ORDER BY expr] -[PRIMARY KEY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../../sql-reference/statements/create.md). - -**ReplacingMergeTree Parámetros** - -- `ver` — column with version. Type `UInt*`, `Date` o `DateTime`. Parámetro opcional. - - Al fusionar, `ReplacingMergeTree` de todas las filas con la misma clave primaria deja solo una: - - - Último en la selección, si `ver` no establecido. - - Con la versión máxima, si `ver` indicado. - -**Cláusulas de consulta** - -Al crear un `ReplacingMergeTree` mesa de la misma [clausula](mergetree.md) se requieren, como al crear un `MergeTree` tabla. - -
- -Método obsoleto para crear una tabla - -!!! attention "Atención" - No use este método en proyectos nuevos y, si es posible, cambie los proyectos antiguos al método descrito anteriormente. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] ReplacingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [ver]) -``` - -Todos los parámetros excepto `ver` el mismo significado que en `MergeTree`. - -- `ver` - columna con la versión. Parámetro opcional. Para una descripción, vea el texto anterior. - -
- -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/es/engines/table-engines/mergetree-family/replication.md b/docs/es/engines/table-engines/mergetree-family/replication.md deleted file mode 100644 index 505f5223800..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/replication.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 31 -toc_title: "Replicaci\xF3n de datos" ---- - -# Replicación de datos {#table_engines-replication} - -La replicación solo se admite para tablas de la familia MergeTree: - -- ReplicatedMergeTree -- ReplicatedSummingMergeTree -- ReplicatedReplacingMergeTree -- ReplicatedAggregatingMergeTree -- ReplicatedCollapsingMergeTree -- ReplicatedVersionedCollapsingMergetree -- ReplicatedGraphiteMergeTree - -La replicación funciona a nivel de una tabla individual, no de todo el servidor. Un servidor puede almacenar tablas replicadas y no replicadas al mismo tiempo. - -La replicación no depende de la fragmentación. Cada fragmento tiene su propia replicación independiente. - -Datos comprimidos para `INSERT` y `ALTER` se replica (para obtener más información, consulte la documentación para [ALTER](../../../sql-reference/statements/alter.md#query_language_queries_alter)). - -`CREATE`, `DROP`, `ATTACH`, `DETACH` y `RENAME` las consultas se ejecutan en un único servidor y no se replican: - -- El `CREATE TABLE` query crea una nueva tabla replicable en el servidor donde se ejecuta la consulta. Si esta tabla ya existe en otros servidores, agrega una nueva réplica. -- El `DROP TABLE` query elimina la réplica ubicada en el servidor donde se ejecuta la consulta. -- El `RENAME` query cambia el nombre de la tabla en una de las réplicas. En otras palabras, las tablas replicadas pueden tener diferentes nombres en diferentes réplicas. - -Uso de ClickHouse [Apache ZooKeeper](https://zookeeper.apache.org) para almacenar metainformación de réplicas. Utilice ZooKeeper versión 3.4.5 o posterior. - -Para utilizar la replicación, establezca los parámetros [Zookeeper](../../../operations/server-configuration-parameters/settings.md#server-settings_zookeeper) sección de configuración del servidor. - -!!! attention "Atención" - No descuides la configuración de seguridad. ClickHouse soporta el `digest` [Esquema de ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) del subsistema de seguridad ZooKeeper. - -Ejemplo de configuración de las direcciones del clúster ZooKeeper: - -``` xml - - - example1 - 2181 - - - example2 - 2181 - - - example3 - 2181 - - -``` - -Puede especificar cualquier clúster ZooKeeper existente y el sistema utilizará un directorio en él para sus propios datos (el directorio se especifica al crear una tabla replicable). - -Si ZooKeeper no está establecido en el archivo de configuración, no puede crear tablas replicadas y las tablas replicadas existentes serán de solo lectura. - -ZooKeeper no se utiliza en `SELECT` consultas porque la replicación no afecta al rendimiento de `SELECT` y las consultas se ejecutan tan rápido como lo hacen para las tablas no replicadas. Al consultar tablas replicadas distribuidas, el comportamiento de ClickHouse se controla mediante la configuración [max_replica_delay_for_distributed_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) y [fallback_to_stale_replicas_for_distributed_queries](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). - -Para cada `INSERT` consulta, aproximadamente diez entradas se agregan a ZooKeeper a través de varias transacciones. (Para ser más precisos, esto es para cada bloque de datos insertado; una consulta INSERT contiene un bloque o un bloque por `max_insert_block_size = 1048576` filas.) Esto conduce a latencias ligeramente más largas para `INSERT` en comparación con las tablas no replicadas. Pero si sigue las recomendaciones para insertar datos en lotes de no más de uno `INSERT` por segundo, no crea ningún problema. Todo el clúster ClickHouse utilizado para coordinar un clúster ZooKeeper tiene un total de varios cientos `INSERTs` por segundo. El rendimiento en las inserciones de datos (el número de filas por segundo) es tan alto como para los datos no replicados. - -Para clústeres muy grandes, puede usar diferentes clústeres de ZooKeeper para diferentes fragmentos. Sin embargo, esto no ha demostrado ser necesario en el Yandex.Clúster Metrica (aproximadamente 300 servidores). - -La replicación es asíncrona y multi-master. `INSERT` consultas (así como `ALTER`) se puede enviar a cualquier servidor disponible. Los datos se insertan en el servidor donde se ejecuta la consulta y, a continuación, se copian a los demás servidores. Debido a que es asincrónico, los datos insertados recientemente aparecen en las otras réplicas con cierta latencia. Si parte de las réplicas no está disponible, los datos se escriben cuando estén disponibles. Si hay una réplica disponible, la latencia es la cantidad de tiempo que tarda en transferir el bloque de datos comprimidos a través de la red. - -De forma predeterminada, una consulta INSERT espera la confirmación de la escritura de los datos de una sola réplica. Si los datos fue correctamente escrito a sólo una réplica y el servidor con esta réplica deja de existir, los datos almacenados se perderán. Para habilitar la confirmación de las escrituras de datos de varias réplicas, utilice `insert_quorum` opcion. - -Cada bloque de datos se escribe atómicamente. La consulta INSERT se divide en bloques hasta `max_insert_block_size = 1048576` filas. En otras palabras, si el `INSERT` consulta tiene menos de 1048576 filas, se hace atómicamente. - -Los bloques de datos se deduplican. Para varias escrituras del mismo bloque de datos (bloques de datos del mismo tamaño que contienen las mismas filas en el mismo orden), el bloque solo se escribe una vez. La razón de esto es en caso de fallas de red cuando la aplicación cliente no sabe si los datos se escribieron en la base de datos, por lo que `INSERT` consulta simplemente se puede repetir. No importa a qué réplica se enviaron los INSERT con datos idénticos. `INSERTs` son idempotentes. Los parámetros de desduplicación son controlados por [merge_tree](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-merge_tree) configuración del servidor. - -Durante la replicación, sólo los datos de origen que se van a insertar se transfieren a través de la red. La transformación de datos adicional (fusión) se coordina y se realiza en todas las réplicas de la misma manera. Esto minimiza el uso de la red, lo que significa que la replicación funciona bien cuando las réplicas residen en centros de datos diferentes. (Tenga en cuenta que la duplicación de datos en diferentes centros de datos es el objetivo principal de la replicación.) - -Puede tener cualquier número de réplicas de los mismos datos. El Yandex.Metrica utiliza doble replicación en producción. Cada servidor utiliza RAID-5 o RAID-6, y RAID-10 en algunos casos. Esta es una solución relativamente confiable y conveniente. - -El sistema supervisa la sincronicidad de los datos en las réplicas y puede recuperarse después de un fallo. La conmutación por error es automática (para pequeñas diferencias en los datos) o semiautomática (cuando los datos difieren demasiado, lo que puede indicar un error de configuración). - -## Creación de tablas replicadas {#creating-replicated-tables} - -El `Replicated` prefijo se agrega al nombre del motor de tabla. Por ejemplo:`ReplicatedMergeTree`. - -**Replicated\*MergeTree parámetros** - -- `zoo_path` — The path to the table in ZooKeeper. -- `replica_name` — The replica name in ZooKeeper. - -Ejemplo: - -``` sql -CREATE TABLE table_name -( - EventDate DateTime, - CounterID UInt32, - UserID UInt32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}') -PARTITION BY toYYYYMM(EventDate) -ORDER BY (CounterID, EventDate, intHash32(UserID)) -SAMPLE BY intHash32(UserID) -``` - -
- -Ejemplo en sintaxis obsoleta - -``` sql -CREATE TABLE table_name -( - EventDate DateTime, - CounterID UInt32, - UserID UInt32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192) -``` - -
- -Como muestra el ejemplo, estos parámetros pueden contener sustituciones entre llaves. Los valores sustituidos se toman de la ‘macros’ sección del archivo de configuración. Ejemplo: - -``` xml - - 05 - 02 - example05-02-1.yandex.ru - -``` - -La ruta de acceso a la tabla en ZooKeeper debe ser única para cada tabla replicada. Las tablas en diferentes fragmentos deben tener rutas diferentes. -En este caso, la ruta consta de las siguientes partes: - -`/clickhouse/tables/` es el prefijo común. Recomendamos usar exactamente este. - -`{layer}-{shard}` es el identificador de fragmento. En este ejemplo consta de dos partes, ya que el Yandex.Metrica clúster utiliza sharding de dos niveles. Para la mayoría de las tareas, puede dejar solo la sustitución {shard}, que se expandirá al identificador de fragmento. - -`table_name` es el nombre del nodo de la tabla en ZooKeeper. Es una buena idea hacerlo igual que el nombre de la tabla. Se define explícitamente, porque a diferencia del nombre de la tabla, no cambia después de una consulta RENAME. -*HINT*: podría agregar un nombre de base de datos delante de `table_name` También. Nivel de Cifrado WEP `db_name.table_name` - -El nombre de réplica identifica diferentes réplicas de la misma tabla. Puede usar el nombre del servidor para esto, como en el ejemplo. El nombre solo tiene que ser único dentro de cada fragmento. - -Puede definir los parámetros explícitamente en lugar de utilizar sustituciones. Esto podría ser conveniente para probar y para configurar clústeres pequeños. Sin embargo, no puede usar consultas DDL distribuidas (`ON CLUSTER` en este caso. - -Cuando se trabaja con clústeres grandes, se recomienda utilizar sustituciones porque reducen la probabilidad de error. - -Ejecute el `CREATE TABLE` consulta en cada réplica. Esta consulta crea una nueva tabla replicada o agrega una nueva réplica a una existente. - -Si agrega una nueva réplica después de que la tabla ya contenga algunos datos en otras réplicas, los datos se copiarán de las otras réplicas a la nueva después de ejecutar la consulta. En otras palabras, la nueva réplica se sincroniza con las demás. - -Para eliminar una réplica, ejecute `DROP TABLE`. However, only one replica is deleted – the one that resides on the server where you run the query. - -## Recuperación después de fallos {#recovery-after-failures} - -Si ZooKeeper no está disponible cuando se inicia un servidor, las tablas replicadas cambian al modo de solo lectura. El sistema intenta conectarse periódicamente a ZooKeeper. - -Si ZooKeeper no está disponible durante un `INSERT`, o se produce un error al interactuar con ZooKeeper, se produce una excepción. - -Después de conectarse a ZooKeeper, el sistema comprueba si el conjunto de datos en el sistema de archivos local coincide con el conjunto de datos esperado (ZooKeeper almacena esta información). Si hay incoherencias menores, el sistema las resuelve sincronizando datos con las réplicas. - -Si el sistema detecta partes de datos rotas (con un tamaño incorrecto de archivos) o partes no reconocidas (partes escritas en el sistema de archivos pero no grabadas en ZooKeeper), las mueve al `detached` subdirectorio (no se eliminan). Las piezas que faltan se copian de las réplicas. - -Tenga en cuenta que ClickHouse no realiza ninguna acción destructiva, como eliminar automáticamente una gran cantidad de datos. - -Cuando el servidor se inicia (o establece una nueva sesión con ZooKeeper), solo verifica la cantidad y el tamaño de todos los archivos. Si los tamaños de los archivos coinciden pero los bytes se han cambiado en algún punto intermedio, esto no se detecta inmediatamente, sino solo cuando se intenta leer los datos `SELECT` consulta. La consulta produce una excepción sobre una suma de comprobación no coincidente o el tamaño de un bloque comprimido. En este caso, las partes de datos se agregan a la cola de verificación y se copian de las réplicas si es necesario. - -Si el conjunto local de datos difiere demasiado del esperado, se activa un mecanismo de seguridad. El servidor ingresa esto en el registro y se niega a iniciarse. La razón de esto es que este caso puede indicar un error de configuración, como si una réplica en un fragmento se configurara accidentalmente como una réplica en un fragmento diferente. Sin embargo, los umbrales para este mecanismo se establecen bastante bajos, y esta situación puede ocurrir durante la recuperación de falla normal. En este caso, los datos se restauran semiautomáticamente, mediante “pushing a button”. - -Para iniciar la recuperación, cree el nodo `/path_to_table/replica_name/flags/force_restore_data` en ZooKeeper con cualquier contenido, o ejecute el comando para restaurar todas las tablas replicadas: - -``` bash -sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data -``` - -A continuación, reinicie el servidor. Al iniciar, el servidor elimina estos indicadores e inicia la recuperación. - -## Recuperación después de la pérdida completa de datos {#recovery-after-complete-data-loss} - -Si todos los datos y metadatos desaparecieron de uno de los servidores, siga estos pasos para la recuperación: - -1. Instale ClickHouse en el servidor. Defina correctamente las sustituciones en el archivo de configuración que contiene el identificador de fragmento y las réplicas, si las usa. -2. Si tenía tablas no duplicadas que deben duplicarse manualmente en los servidores, copie sus datos desde una réplica (en el directorio `/var/lib/clickhouse/data/db_name/table_name/`). -3. Copiar definiciones de tablas ubicadas en `/var/lib/clickhouse/metadata/` de una réplica. Si un identificador de fragmento o réplica se define explícitamente en las definiciones de tabla, corríjalo para que corresponda a esta réplica. (Como alternativa, inicie el servidor y `ATTACH TABLE` consultas que deberían haber estado en el .sql archivos en `/var/lib/clickhouse/metadata/`.) -4. Para iniciar la recuperación, cree el nodo ZooKeeper `/path_to_table/replica_name/flags/force_restore_data` con cualquier contenido o ejecute el comando para restaurar todas las tablas replicadas: `sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data` - -Luego inicie el servidor (reinicie, si ya se está ejecutando). Los datos se descargarán de las réplicas. - -Una opción de recuperación alternativa es eliminar información sobre la réplica perdida de ZooKeeper (`/path_to_table/replica_name`), luego vuelva a crear la réplica como se describe en “[Creación de tablas replicadas](#creating-replicated-tables)”. - -No hay restricción en el ancho de banda de la red durante la recuperación. Tenga esto en cuenta si está restaurando muchas réplicas a la vez. - -## La conversión de MergeTree a ReplicatedMergeTree {#converting-from-mergetree-to-replicatedmergetree} - -Usamos el término `MergeTree` para referirse a todos los motores de mesa en el `MergeTree family`, lo mismo que para `ReplicatedMergeTree`. - -Si usted tenía un `MergeTree` tabla replicada manualmente, puede convertirla en una tabla replicada. Es posible que tenga que hacer esto si ya ha recopilado una gran cantidad de datos `MergeTree` y ahora desea habilitar la replicación. - -Si los datos difieren en varias réplicas, primero sincronícelos o elimínelos en todas las réplicas, excepto en una. - -Cambie el nombre de la tabla MergeTree existente y, a continuación, cree un `ReplicatedMergeTree` mesa con el antiguo nombre. -Mueva los datos de la tabla antigua a la `detached` subdirectorio dentro del directorio con los nuevos datos de la tabla (`/var/lib/clickhouse/data/db_name/table_name/`). -Luego ejecuta `ALTER TABLE ATTACH PARTITION` en una de las réplicas para agregar estas partes de datos al conjunto de trabajo. - -## La conversión de ReplicatedMergeTree a MergeTree {#converting-from-replicatedmergetree-to-mergetree} - -Cree una tabla MergeTree con un nombre diferente. Mueva todos los datos del directorio con el `ReplicatedMergeTree` datos de la tabla al directorio de datos de la nueva tabla. A continuación, elimine el `ReplicatedMergeTree` y reinicie el servidor. - -Si desea deshacerse de un `ReplicatedMergeTree` sin iniciar el servidor: - -- Eliminar el correspondiente `.sql` archivo en el directorio de metadatos (`/var/lib/clickhouse/metadata/`). -- Eliminar la ruta correspondiente en ZooKeeper (`/path_to_table/replica_name`). - -Después de esto, puede iniciar el servidor, crear un `MergeTree` tabla, mueva los datos a su directorio y, a continuación, reinicie el servidor. - -## Recuperación cuando se pierden o se dañan los metadatos del clúster Zookeeper {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} - -Si los datos de ZooKeeper se perdieron o se dañaron, puede guardar los datos moviéndolos a una tabla no duplicada como se describió anteriormente. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/es/engines/table-engines/mergetree-family/summingmergetree.md b/docs/es/engines/table-engines/mergetree-family/summingmergetree.md deleted file mode 100644 index 3ae9a1515c0..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/summingmergetree.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 34 -toc_title: SummingMergeTree ---- - -# SummingMergeTree {#summingmergetree} - -El motor hereda de [Método de codificación de datos:](mergetree.md#table_engines-mergetree). La diferencia es que al fusionar partes de datos para `SummingMergeTree` ClickHouse reemplaza todas las filas con la misma clave primaria (o más exactamente, con la misma [clave de clasificación](mergetree.md)) con una fila que contiene valores resumidos para las columnas con el tipo de datos numérico. Si la clave de ordenación está compuesta de manera que un solo valor de clave corresponde a un gran número de filas, esto reduce significativamente el volumen de almacenamiento y acelera la selección de datos. - -Recomendamos usar el motor junto con `MergeTree`. Almacenar datos completos en `MergeTree` mesa, y el uso `SummingMergeTree` para el almacenamiento de datos agregados, por ejemplo, al preparar informes. Tal enfoque evitará que pierda datos valiosos debido a una clave primaria compuesta incorrectamente. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = SummingMergeTree([columns]) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../../sql-reference/statements/create.md). - -**Parámetros de SummingMergeTree** - -- `columns` - una tupla con los nombres de las columnas donde se resumirán los valores. Parámetro opcional. - Las columnas deben ser de tipo numérico y no deben estar en la clave principal. - - Si `columns` no especificado, ClickHouse resume los valores de todas las columnas con un tipo de datos numérico que no están en la clave principal. - -**Cláusulas de consulta** - -Al crear un `SummingMergeTree` mesa de la misma [clausula](mergetree.md) se requieren, como al crear un `MergeTree` tabla. - -
- -Método obsoleto para crear una tabla - -!!! attention "Atención" - No use este método en proyectos nuevos y, si es posible, cambie los proyectos antiguos al método descrito anteriormente. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] SummingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [columns]) -``` - -Todos los parámetros excepto `columns` el mismo significado que en `MergeTree`. - -- `columns` — tuple with names of columns values of which will be summarized. Optional parameter. For a description, see the text above. - -
- -## Ejemplo de uso {#usage-example} - -Considere la siguiente tabla: - -``` sql -CREATE TABLE summtt -( - key UInt32, - value UInt32 -) -ENGINE = SummingMergeTree() -ORDER BY key -``` - -Insertar datos: - -``` sql -INSERT INTO summtt Values(1,1),(1,2),(2,1) -``` - -ClickHouse puede sumar todas las filas no completamente ([ver abajo](#data-processing)), entonces usamos una función agregada `sum` y `GROUP BY` cláusula en la consulta. - -``` sql -SELECT key, sum(value) FROM summtt GROUP BY key -``` - -``` text -┌─key─┬─sum(value)─┐ -│ 2 │ 1 │ -│ 1 │ 3 │ -└─────┴────────────┘ -``` - -## Procesamiento de datos {#data-processing} - -Cuando los datos se insertan en una tabla, se guardan tal cual. ClickHouse combina las partes insertadas de los datos periódicamente y esto es cuando las filas con la misma clave principal se suman y se reemplazan con una para cada parte resultante de los datos. - -ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) una función agregada [resumir()](../../../sql-reference/aggregate-functions/reference.md#agg_function-sum) y `GROUP BY` cláusula se debe utilizar en una consulta como se describe en el ejemplo anterior. - -### Reglas comunes para la suma {#common-rules-for-summation} - -Se resumen los valores de las columnas con el tipo de datos numérico. El conjunto de columnas está definido por el parámetro `columns`. - -Si los valores eran 0 en todas las columnas para la suma, se elimina la fila. - -Si la columna no está en la clave principal y no se resume, se selecciona un valor arbitrario entre los existentes. - -Los valores no se resumen para las columnas de la clave principal. - -### La suma en las columnas de función agregada {#the-summation-in-the-aggregatefunction-columns} - -Para columnas de [Tipo AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md) ClickHouse se comporta como [AgregaciónMergeTree](aggregatingmergetree.md) agregación del motor según la función. - -### Estructuras anidadas {#nested-structures} - -La tabla puede tener estructuras de datos anidadas que se procesan de una manera especial. - -Si el nombre de una tabla anidada termina con `Map` y contiene al menos dos columnas que cumplen los siguientes criterios: - -- la primera columna es numérica `(*Int*, Date, DateTime)` o una cadena `(String, FixedString)`, vamos a llamarlo `key`, -- las otras columnas son aritméticas `(*Int*, Float32/64)`, vamos a llamarlo `(values...)`, - -entonces esta tabla anidada se interpreta como una asignación de `key => (values...)`, y al fusionar sus filas, los elementos de dos conjuntos de datos se fusionan por `key` con una suma de los correspondientes `(values...)`. - -Ejemplos: - -``` text -[(1, 100)] + [(2, 150)] -> [(1, 100), (2, 150)] -[(1, 100)] + [(1, 150)] -> [(1, 250)] -[(1, 100)] + [(1, 150), (2, 150)] -> [(1, 250), (2, 150)] -[(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] -``` - -Al solicitar datos, utilice el [sumMap(clave, valor)](../../../sql-reference/aggregate-functions/reference.md) función para la agregación de `Map`. - -Para la estructura de datos anidados, no necesita especificar sus columnas en la tupla de columnas para la suma. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/summingmergetree/) diff --git a/docs/es/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/es/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md deleted file mode 100644 index d69bfe9440e..00000000000 --- a/docs/es/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 37 -toc_title: VersionedCollapsingMergeTree ---- - -# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} - -Este motor: - -- Permite la escritura rápida de estados de objetos que cambian continuamente. -- Elimina los estados de objetos antiguos en segundo plano. Esto reduce significativamente el volumen de almacenamiento. - -Vea la sección [Derrumbar](#table_engines_versionedcollapsingmergetree) para más detalles. - -El motor hereda de [Método de codificación de datos:](mergetree.md#table_engines-mergetree) y agrega la lógica para colapsar filas al algoritmo para fusionar partes de datos. `VersionedCollapsingMergeTree` tiene el mismo propósito que [ColapsarMergeTree](collapsingmergetree.md) pero usa un algoritmo de colapso diferente que permite insertar los datos en cualquier orden con múltiples hilos. En particular, el `Version` columna ayuda a contraer las filas correctamente, incluso si se insertan en el orden incorrecto. En contraste, `CollapsingMergeTree` sólo permite la inserción estrictamente consecutiva. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = VersionedCollapsingMergeTree(sign, version) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -Para obtener una descripción de los parámetros de consulta, consulte [descripción de la consulta](../../../sql-reference/statements/create.md). - -**Parámetros del motor** - -``` sql -VersionedCollapsingMergeTree(sign, version) -``` - -- `sign` — Name of the column with the type of row: `1` es una “state” fila, `-1` es una “cancel” fila. - - El tipo de datos de columna debe ser `Int8`. - -- `version` — Name of the column with the version of the object state. - - El tipo de datos de columna debe ser `UInt*`. - -**Cláusulas de consulta** - -Al crear un `VersionedCollapsingMergeTree` mesa, la misma [clausula](mergetree.md) se requieren como al crear un `MergeTree` tabla. - -
- -Método obsoleto para crear una tabla - -!!! attention "Atención" - No utilice este método en nuevos proyectos. Si es posible, cambie los proyectos antiguos al método descrito anteriormente. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] VersionedCollapsingMergeTree(date-column [, samp#table_engines_versionedcollapsingmergetreeling_expression], (primary, key), index_granularity, sign, version) -``` - -Todos los parámetros excepto `sign` y `version` el mismo significado que en `MergeTree`. - -- `sign` — Name of the column with the type of row: `1` es una “state” fila, `-1` es una “cancel” fila. - - Column Data Type — `Int8`. - -- `version` — Name of the column with the version of the object state. - - El tipo de datos de columna debe ser `UInt*`. - -
- -## Derrumbar {#table_engines_versionedcollapsingmergetree} - -### Datos {#data} - -Considere una situación en la que necesite guardar datos que cambien continuamente para algún objeto. Es razonable tener una fila para un objeto y actualizar la fila siempre que haya cambios. Sin embargo, la operación de actualización es costosa y lenta para un DBMS porque requiere volver a escribir los datos en el almacenamiento. La actualización no es aceptable si necesita escribir datos rápidamente, pero puede escribir los cambios en un objeto secuencialmente de la siguiente manera. - -Utilice el `Sign` columna al escribir la fila. Si `Sign = 1` significa que la fila es un estado de un objeto (llamémoslo el “state” fila). Si `Sign = -1` indica la cancelación del estado de un objeto con los mismos atributos (llamémoslo el “cancel” fila). También use el `Version` columna, que debe identificar cada estado de un objeto con un número separado. - -Por ejemplo, queremos calcular cuántas páginas visitaron los usuarios en algún sitio y cuánto tiempo estuvieron allí. En algún momento escribimos la siguiente fila con el estado de la actividad del usuario: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -En algún momento después registramos el cambio de actividad del usuario y lo escribimos con las siguientes dos filas. - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -La primera fila cancela el estado anterior del objeto (usuario). Debe copiar todos los campos del estado cancelado excepto `Sign`. - -La segunda fila contiene el estado actual. - -Debido a que solo necesitamos el último estado de actividad del usuario, las filas - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -se puede eliminar, colapsando el estado no válido (antiguo) del objeto. `VersionedCollapsingMergeTree` hace esto mientras fusiona las partes de datos. - -Para averiguar por qué necesitamos dos filas para cada cambio, vea [Algoritmo](#table_engines-versionedcollapsingmergetree-algorithm). - -**Notas sobre el uso** - -1. El programa que escribe los datos debe recordar el estado de un objeto para cancelarlo. El “cancel” cadena debe ser una copia de la “state” con lo opuesto `Sign`. Esto aumenta el tamaño inicial de almacenamiento, pero permite escribir los datos rápidamente. -2. Las matrices de largo crecimiento en columnas reducen la eficiencia del motor debido a la carga para escribir. Cuanto más sencillos sean los datos, mejor será la eficiencia. -3. `SELECT` Los resultados dependen en gran medida de la coherencia del historial de cambios de objetos. Sea preciso al preparar los datos para insertarlos. Puede obtener resultados impredecibles con datos incoherentes, como valores negativos para métricas no negativas, como la profundidad de la sesión. - -### Algoritmo {#table_engines-versionedcollapsingmergetree-algorithm} - -Cuando ClickHouse combina partes de datos, elimina cada par de filas que tienen la misma clave principal y versión y diferentes `Sign`. El orden de las filas no importa. - -Cuando ClickHouse inserta datos, ordena filas por la clave principal. Si el `Version` la columna no está en la clave principal, ClickHouse la agrega a la clave principal implícitamente como el último campo y la usa para ordenar. - -## Selección de datos {#selecting-data} - -ClickHouse no garantiza que todas las filas con la misma clave principal estén en la misma parte de datos resultante o incluso en el mismo servidor físico. Esto es cierto tanto para escribir los datos como para la posterior fusión de las partes de datos. Además, ClickHouse procesa `SELECT` consultas con múltiples subprocesos, y no puede predecir el orden de las filas en el resultado. Esto significa que la agregación es necesaria si hay una necesidad de obtener completamente “collapsed” datos de un `VersionedCollapsingMergeTree` tabla. - -Para finalizar el colapso, escriba una consulta con un `GROUP BY` cláusula y funciones agregadas que representan el signo. Por ejemplo, para calcular la cantidad, use `sum(Sign)` en lugar de `count()`. Para calcular la suma de algo, use `sum(Sign * x)` en lugar de `sum(x)` y agregar `HAVING sum(Sign) > 0`. - -Los agregados `count`, `sum` y `avg` se puede calcular de esta manera. El agregado `uniq` se puede calcular si un objeto tiene al menos un estado no colapsado. Los agregados `min` y `max` no se puede calcular porque `VersionedCollapsingMergeTree` no guarda el historial de valores de estados colapsados. - -Si necesita extraer los datos con “collapsing” pero sin agregación (por ejemplo, para verificar si hay filas presentes cuyos valores más nuevos coinciden con ciertas condiciones), puede usar el `FINAL` modificador para el `FROM` clausula. Este enfoque es ineficiente y no debe usarse con tablas grandes. - -## Ejemplo de uso {#example-of-use} - -Datos de ejemplo: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -Creación de la tabla: - -``` sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews UInt8, - Duration UInt8, - Sign Int8, - Version UInt8 -) -ENGINE = VersionedCollapsingMergeTree(Sign, Version) -ORDER BY UserID -``` - -Insertar los datos: - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) -``` - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) -``` - -Usamos dos `INSERT` consultas para crear dos partes de datos diferentes. Si insertamos los datos con una sola consulta, ClickHouse crea una parte de datos y nunca realizará ninguna fusión. - -Obtener los datos: - -``` sql -SELECT * FROM UAct -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -¿Qué vemos aquí y dónde están las partes colapsadas? -Creamos dos partes de datos usando dos `INSERT` consulta. El `SELECT` la consulta se realizó en dos subprocesos, y el resultado es un orden aleatorio de filas. -No se produjo el colapso porque las partes de datos aún no se han fusionado. ClickHouse fusiona partes de datos en un punto desconocido en el tiempo que no podemos predecir. - -Es por eso que necesitamos agregación: - -``` sql -SELECT - UserID, - sum(PageViews * Sign) AS PageViews, - sum(Duration * Sign) AS Duration, - Version -FROM UAct -GROUP BY UserID, Version -HAVING sum(Sign) > 0 -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ -│ 4324182021466249494 │ 6 │ 185 │ 2 │ -└─────────────────────┴───────────┴──────────┴─────────┘ -``` - -Si no necesitamos agregación y queremos forzar el colapso, podemos usar el `FINAL` modificador para el `FROM` clausula. - -``` sql -SELECT * FROM UAct FINAL -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -Esta es una forma muy ineficiente de seleccionar datos. No lo use para mesas grandes. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/es/engines/table-engines/special/buffer.md b/docs/es/engines/table-engines/special/buffer.md deleted file mode 100644 index b3a26ff356a..00000000000 --- a/docs/es/engines/table-engines/special/buffer.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 45 -toc_title: "B\xFAfer" ---- - -# Búfer {#buffer} - -Almacena los datos para escribir en la memoria RAM, enjuagándolos periódicamente a otra tabla. Durante la operación de lectura, los datos se leen desde el búfer y la otra tabla simultáneamente. - -``` sql -Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes) -``` - -Parámetros del motor: - -- `database` – Database name. Instead of the database name, you can use a constant expression that returns a string. -- `table` – Table to flush data to. -- `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` de búferes independientes. Valor recomendado: 16. -- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, y `max_bytes` – Conditions for flushing data from the buffer. - -Los datos se vacían del búfer y se escriben en la tabla de destino si `min*` condiciones o al menos una `max*` condición se cumplen. - -- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. -- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. -- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. - -Durante la operación de escritura, los datos se insertan en un `num_layers` número de búferes aleatorios. O bien, si la parte de datos para insertar es lo suficientemente grande (mayor que `max_rows` o `max_bytes`), se escribe directamente en la tabla de destino, omitiendo el búfer. - -Las condiciones para el lavado de los datos se calculan por separado para cada uno de los `num_layers` búfer. Por ejemplo, si `num_layers = 16` y `max_bytes = 100000000`, el consumo máximo de RAM es de 1.6 GB. - -Ejemplo: - -``` sql -CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10, 100, 10000, 1000000, 10000000, 100000000) -``` - -Creación de un ‘merge.hits_buffer’ mesa con la misma estructura que ‘merge.hits’ y usando el motor Buffer. Al escribir en esta tabla, los datos se almacenan en la memoria RAM y ‘merge.hits’ tabla. Se crean 16 búferes. Los datos de cada uno de ellos se vacían si han pasado 100 segundos o se han escrito un millón de filas o se han escrito 100 MB de datos; o si simultáneamente han pasado 10 segundos y se han escrito 10.000 filas y 10 MB de datos. Por ejemplo, si solo se ha escrito una fila, después de 100 segundos se vaciará, pase lo que pase. Pero si se han escrito muchas filas, los datos se vaciarán antes. - -Cuando se detiene el servidor, con DROP TABLE o DETACH TABLE, los datos del búfer también se vacían a la tabla de destino. - -Puede establecer cadenas vacías entre comillas simples para la base de datos y el nombre de la tabla. Esto indica la ausencia de una tabla de destino. En este caso, cuando se alcanzan las condiciones de descarga de datos, el búfer simplemente se borra. Esto puede ser útil para mantener una ventana de datos en la memoria. - -Al leer desde una tabla de búfer, los datos se procesan tanto desde el búfer como desde la tabla de destino (si hay uno). -Tenga en cuenta que las tablas Buffer no admiten un índice. En otras palabras, los datos del búfer se analizan por completo, lo que puede ser lento para los búferes grandes. (Para los datos de una tabla subordinada, se utilizará el índice que admite.) - -Si el conjunto de columnas de la tabla Buffer no coincide con el conjunto de columnas de una tabla subordinada, se inserta un subconjunto de columnas que existen en ambas tablas. - -Si los tipos no coinciden con una de las columnas de la tabla Búfer y una tabla subordinada, se escribe un mensaje de error en el registro del servidor y se borra el búfer. -Lo mismo sucede si la tabla subordinada no existe cuando se vacía el búfer. - -Si necesita ejecutar ALTER para una tabla subordinada y la tabla de búfer, se recomienda eliminar primero la tabla de búfer, ejecutar ALTER para la tabla subordinada y, a continuación, crear la tabla de búfer de nuevo. - -Si el servidor se reinicia de forma anormal, se pierden los datos del búfer. - -FINAL y SAMPLE no funcionan correctamente para las tablas Buffer. Estas condiciones se pasan a la tabla de destino, pero no se utilizan para procesar datos en el búfer. Si se requieren estas características, recomendamos usar solo la tabla Buffer para escribir, mientras lee desde la tabla de destino. - -Al agregar datos a un búfer, uno de los búferes está bloqueado. Esto provoca retrasos si se realiza una operación de lectura simultáneamente desde la tabla. - -Los datos que se insertan en una tabla de búfer pueden terminar en la tabla subordinada en un orden diferente y en bloques diferentes. Debido a esto, una tabla Buffer es difícil de usar para escribir en un CollapsingMergeTree correctamente. Para evitar problemas, puede establecer ‘num_layers’ a 1. - -Si se replica la tabla de destino, se pierden algunas características esperadas de las tablas replicadas al escribir en una tabla de búfer. Los cambios aleatorios en el orden de las filas y los tamaños de las partes de datos hacen que la desduplicación de datos deje de funcionar, lo que significa que no es posible tener un ‘exactly once’ escribir en tablas replicadas. - -Debido a estas desventajas, solo podemos recomendar el uso de una tabla Buffer en casos raros. - -Una tabla de búfer se usa cuando se reciben demasiados INSERT de un gran número de servidores durante una unidad de tiempo y los datos no se pueden almacenar en búfer antes de la inserción, lo que significa que los INSERT no pueden ejecutarse lo suficientemente rápido. - -Tenga en cuenta que no tiene sentido insertar datos una fila a la vez, incluso para las tablas de búfer. Esto solo producirá una velocidad de unos pocos miles de filas por segundo, mientras que la inserción de bloques de datos más grandes puede producir más de un millón de filas por segundo (consulte la sección “Performance”). - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/buffer/) diff --git a/docs/es/engines/table-engines/special/dictionary.md b/docs/es/engines/table-engines/special/dictionary.md deleted file mode 100644 index 6d9136a6a23..00000000000 --- a/docs/es/engines/table-engines/special/dictionary.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 35 -toc_title: Diccionario ---- - -# Diccionario {#dictionary} - -El `Dictionary` el motor muestra el [diccionario](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) datos como una tabla ClickHouse. - -Como ejemplo, considere un diccionario de `products` con la siguiente configuración: - -``` xml - - - products - - -
products
- DSN=some-db-server - - - - 300 - 360 - - - - - - - product_id - - - title - String - - - - - -``` - -Consultar los datos del diccionario: - -``` sql -SELECT - name, - type, - key, - attribute.names, - attribute.types, - bytes_allocated, - element_count, - source -FROM system.dictionaries -WHERE name = 'products' -``` - -``` text -┌─name─────┬─type─┬─key────┬─attribute.names─┬─attribute.types─┬─bytes_allocated─┬─element_count─┬─source──────────┐ -│ products │ Flat │ UInt64 │ ['title'] │ ['String'] │ 23065376 │ 175032 │ ODBC: .products │ -└──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ -``` - -Puede usar el [dictGet\*](../../../sql-reference/functions/ext-dict-functions.md#ext_dict_functions) función para obtener los datos del diccionario en este formato. - -Esta vista no es útil cuando necesita obtener datos sin procesar o cuando `JOIN` operación. Para estos casos, puede usar el `Dictionary` motor, que muestra los datos del diccionario en una tabla. - -Sintaxis: - -``` sql -CREATE TABLE %table_name% (%fields%) engine = Dictionary(%dictionary_name%)` -``` - -Ejemplo de uso: - -``` sql -create table products (product_id UInt64, title String) Engine = Dictionary(products); -``` - - Ok - -Echa un vistazo a lo que hay en la mesa. - -``` sql -select * from products limit 1; -``` - -``` text -┌────product_id─┬─title───────────┐ -│ 152689 │ Some item │ -└───────────────┴─────────────────┘ -``` - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) diff --git a/docs/es/engines/table-engines/special/distributed.md b/docs/es/engines/table-engines/special/distributed.md deleted file mode 100644 index bac407a651a..00000000000 --- a/docs/es/engines/table-engines/special/distributed.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 33 -toc_title: Distribuido ---- - -# Distribuido {#distributed} - -**Las tablas con motor distribuido no almacenan ningún dato por sí mismas**, pero permite el procesamiento de consultas distribuidas en varios servidores. -La lectura se paralela automáticamente. Durante una lectura, se utilizan los índices de tabla en servidores remotos, si los hay. - -El motor distribuido acepta parámetros: - -- el nombre del clúster en el archivo de configuración del servidor - -- el nombre de una base de datos remota - -- el nombre de una tabla remota - -- (opcionalmente) clave de fragmentación - -- nombre de política (opcionalmente), se usará para almacenar archivos temporales para el envío asíncrono - - Ver también: - - - `insert_distributed_sync` configuración - - [Método de codificación de datos:](../mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) para los ejemplos - -Ejemplo: - -``` sql -Distributed(logs, default, hits[, sharding_key[, policy_name]]) -``` - -Los datos se leerán desde todos los servidores ‘logs’ clúster, desde el valor predeterminado.tabla de éxitos ubicada en cada servidor del clúster. -Los datos no solo se leen sino que se procesan parcialmente en los servidores remotos (en la medida en que esto sea posible). -Por ejemplo, para una consulta con GROUP BY, los datos se agregarán en servidores remotos y los estados intermedios de las funciones agregadas se enviarán al servidor solicitante. Luego, los datos se agregarán más. - -En lugar del nombre de la base de datos, puede usar una expresión constante que devuelva una cadena. Por ejemplo: currentDatabase(). - -logs – The cluster name in the server's config file. - -Los clústeres se establecen así: - -``` xml - - - - - 1 - - false - - example01-01-1 - 9000 - - - example01-01-2 - 9000 - - - - 2 - false - - example01-02-1 - 9000 - - - example01-02-2 - 1 - 9440 - - - - -``` - -Aquí se define un clúster con el nombre ‘logs’ que consta de dos fragmentos, cada uno de los cuales contiene dos réplicas. -Los fragmentos se refieren a los servidores que contienen diferentes partes de los datos (para leer todos los datos, debe acceder a todos los fragmentos). -Las réplicas están duplicando servidores (para leer todos los datos, puede acceder a los datos en cualquiera de las réplicas). - -Los nombres de clúster no deben contener puntos. - -Los parámetros `host`, `port`, y opcionalmente `user`, `password`, `secure`, `compression` se especifican para cada servidor: -- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server doesn't start. If you change the DNS record, restart the server. -- `port` – The TCP port for messenger activity (‘tcp_port’ en la configuración, generalmente establecido en 9000). No lo confundas con http_port. -- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Derechos de acceso](../../../operations/access-rights.md). -- `password` – The password for connecting to a remote server (not masked). Default value: empty string. -- `secure` - Use ssl para la conexión, por lo general también debe definir `port` = 9440. El servidor debe escuchar en `9440` y tener certificados correctos. -- `compression` - Utilice la compresión de datos. Valor predeterminado: true. - -When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#settings-load_balancing) configuración. -Si no se establece la conexión con el servidor, habrá un intento de conectarse con un breve tiempo de espera. Si la conexión falla, se seleccionará la siguiente réplica, y así sucesivamente para todas las réplicas. Si el intento de conexión falló para todas las réplicas, el intento se repetirá de la misma manera, varias veces. -Esto funciona a favor de la resiliencia, pero no proporciona una tolerancia completa a errores: un servidor remoto podría aceptar la conexión, pero podría no funcionar o funcionar mal. - -Puede especificar solo uno de los fragmentos (en este caso, el procesamiento de consultas debe denominarse remoto, en lugar de distribuido) o hasta cualquier número de fragmentos. En cada fragmento, puede especificar entre una y cualquier número de réplicas. Puede especificar un número diferente de réplicas para cada fragmento. - -Puede especificar tantos clústeres como desee en la configuración. - -Para ver los clústeres, utilice el ‘system.clusters’ tabla. - -El motor distribuido permite trabajar con un clúster como un servidor local. Sin embargo, el clúster es inextensible: debe escribir su configuración en el archivo de configuración del servidor (mejor aún, para todos los servidores del clúster). - -The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don't need to create a Distributed table – use the ‘remote’ función de tabla en su lugar. Vea la sección [Funciones de tabla](../../../sql-reference/table-functions/index.md). - -Hay dos métodos para escribir datos en un clúster: - -Primero, puede definir a qué servidores escribir en qué datos y realizar la escritura directamente en cada fragmento. En otras palabras, realice INSERT en las tablas que la tabla distribuida “looks at”. Esta es la solución más flexible, ya que puede usar cualquier esquema de fragmentación, que podría ser no trivial debido a los requisitos del área temática. Esta es también la solución más óptima ya que los datos se pueden escribir en diferentes fragmentos de forma completamente independiente. - -En segundo lugar, puede realizar INSERT en una tabla distribuida. En este caso, la tabla distribuirá los datos insertados a través de los propios servidores. Para escribir en una tabla distribuida, debe tener un conjunto de claves de fragmentación (el último parámetro). Además, si solo hay un fragmento, la operación de escritura funciona sin especificar la clave de fragmentación, ya que no significa nada en este caso. - -Cada fragmento puede tener un peso definido en el archivo de configuración. Por defecto, el peso es igual a uno. Los datos se distribuyen entre fragmentos en la cantidad proporcional al peso del fragmento. Por ejemplo, si hay dos fragmentos y el primero tiene un peso de 9 mientras que el segundo tiene un peso de 10, el primero se enviará 9 / 19 partes de las filas, y el segundo se enviará 10 / 19. - -Cada fragmento puede tener el ‘internal_replication’ parámetro definido en el archivo de configuración. - -Si este parámetro se establece en ‘true’, la operación de escritura selecciona la primera réplica en buen estado y escribe datos en ella. Utilice esta alternativa si la tabla Distribuida “looks at” tablas replicadas. En otras palabras, si la tabla donde se escribirán los datos los replicará por sí misma. - -Si se establece en ‘false’ (el valor predeterminado), los datos se escriben en todas las réplicas. En esencia, esto significa que la tabla distribuida replica los datos en sí. Esto es peor que usar tablas replicadas, porque no se verifica la consistencia de las réplicas y, con el tiempo, contendrán datos ligeramente diferentes. - -Para seleccionar el fragmento al que se envía una fila de datos, se analiza la expresión de fragmentación y su resto se toma de dividirlo por el peso total de los fragmentos. La fila se envía al fragmento que corresponde al medio intervalo de los restos de ‘prev_weight’ a ‘prev_weights + weight’, donde ‘prev_weights’ es el peso total de los fragmentos con el número más pequeño, y ‘weight’ es el peso de este fragmento. Por ejemplo, si hay dos fragmentos, y el primero tiene un peso de 9 mientras que el segundo tiene un peso de 10, la fila se enviará al primer fragmento para los restos del rango \[0, 9), y al segundo para los restos del rango \[9, 19). - -La expresión de fragmentación puede ser cualquier expresión de constantes y columnas de tabla que devuelva un entero. Por ejemplo, puede usar la expresión ‘rand()’ para la distribución aleatoria de datos, o ‘UserID’ para la distribución por el resto de dividir la ID del usuario (entonces los datos de un solo usuario residirán en un solo fragmento, lo que simplifica la ejecución de IN y JOIN por los usuarios). Si una de las columnas no se distribuye lo suficientemente uniformemente, puede envolverla en una función hash: intHash64(UserID) . - -Un simple recordatorio de la división es una solución limitada para sharding y no siempre es apropiado. Funciona para volúmenes medianos y grandes de datos (docenas de servidores), pero no para volúmenes muy grandes de datos (cientos de servidores o más). En este último caso, use el esquema de fragmentación requerido por el área asunto, en lugar de usar entradas en Tablas distribuidas. - -SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you don't have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. - -Debería preocuparse por el esquema de fragmentación en los siguientes casos: - -- Se utilizan consultas que requieren unir datos (IN o JOIN) mediante una clave específica. Si esta clave fragmenta datos, puede usar IN local o JOIN en lugar de GLOBAL IN o GLOBAL JOIN, que es mucho más eficiente. -- Se usa una gran cantidad de servidores (cientos o más) con una gran cantidad de consultas pequeñas (consultas de clientes individuales: sitios web, anunciantes o socios). Para que las pequeñas consultas no afecten a todo el clúster, tiene sentido ubicar datos para un solo cliente en un solo fragmento. Alternativamente, como lo hemos hecho en Yandex.Metrica, puede configurar sharding de dos niveles: divida todo el clúster en “layers”, donde una capa puede consistir en varios fragmentos. Los datos de un único cliente se encuentran en una sola capa, pero los fragmentos se pueden agregar a una capa según sea necesario y los datos se distribuyen aleatoriamente dentro de ellos. Las tablas distribuidas se crean para cada capa y se crea una única tabla distribuida compartida para consultas globales. - -Los datos se escriben de forma asíncrona. Cuando se inserta en la tabla, el bloque de datos se acaba de escribir en el sistema de archivos local. Los datos se envían a los servidores remotos en segundo plano tan pronto como sea posible. El período de envío de datos está gestionado por el [Distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) y [Distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) configuración. El `Distributed` el motor envía cada archivo con datos insertados por separado, pero puede habilitar el envío por lotes de archivos [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) configuración. Esta configuración mejora el rendimiento del clúster al utilizar mejor los recursos de red y servidor local. Debe comprobar si los datos se envían correctamente comprobando la lista de archivos (datos en espera de ser enviados) en el directorio de la tabla: `/var/lib/clickhouse/data/database/table/`. - -Si el servidor dejó de existir o tuvo un reinicio aproximado (por ejemplo, después de un error de dispositivo) después de un INSERT en una tabla distribuida, es posible que se pierdan los datos insertados. Si se detecta un elemento de datos dañado en el directorio de la tabla, se transfiere al ‘broken’ subdirectorio y ya no se utiliza. - -Cuando la opción max_parallel_replicas está habilitada, el procesamiento de consultas se paralela en todas las réplicas dentro de un solo fragmento. Para obtener más información, consulte la sección [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). - -## Virtual Columnas {#virtual-columns} - -- `_shard_num` — Contains the `shard_num` (de `system.clusters`). Tipo: [UInt32](../../../sql-reference/data-types/int-uint.md). - -!!! note "Nota" - Ya [`remote`](../../../sql-reference/table-functions/remote.md)/`cluster` funciones de tabla crean internamente instancia temporal del mismo motor distribuido, `_shard_num` está disponible allí también. - -**Ver también** - -- [Virtual columnas](index.md#table_engines-virtual_columns) - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/es/engines/table-engines/special/external-data.md b/docs/es/engines/table-engines/special/external-data.md deleted file mode 100644 index f2ce4abbb0f..00000000000 --- a/docs/es/engines/table-engines/special/external-data.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 34 -toc_title: Datos externos ---- - -# Datos externos para el procesamiento de consultas {#external-data-for-query-processing} - -ClickHouse permite enviar a un servidor los datos necesarios para procesar una consulta, junto con una consulta SELECT. Estos datos se colocan en una tabla temporal (consulte la sección “Temporary tables”) y se puede utilizar en la consulta (por ejemplo, en operadores IN). - -Por ejemplo, si tiene un archivo de texto con identificadores de usuario importantes, puede cargarlo en el servidor junto con una consulta que utilice la filtración de esta lista. - -Si necesita ejecutar más de una consulta con un gran volumen de datos externos, no utilice esta función. Es mejor cargar los datos a la base de datos con anticipación. - -Los datos externos se pueden cargar mediante el cliente de línea de comandos (en modo no interactivo) o mediante la interfaz HTTP. - -En el cliente de línea de comandos, puede especificar una sección de parámetros en el formato - -``` bash ---external --file=... [--name=...] [--format=...] [--types=...|--structure=...] -``` - -Puede tener varias secciones como esta, para el número de tablas que se transmiten. - -**–external** – Marks the beginning of a clause. -**–file** – Path to the file with the table dump, or -, which refers to stdin. -Solo se puede recuperar una sola tabla de stdin. - -Los siguientes parámetros son opcionales: **–name**– Name of the table. If omitted, _data is used. -**–format** – Data format in the file. If omitted, TabSeparated is used. - -Se requiere uno de los siguientes parámetros:**–types** – A list of comma-separated column types. For example: `UInt64,String`. The columns will be named _1, _2, … -**–structure**– The table structure in the format`UserID UInt64`, `URL String`. Define los nombres y tipos de columna. - -Los archivos especificados en ‘file’ se analizará mediante el formato especificado en ‘format’ utilizando los tipos de datos especificados en ‘types’ o ‘structure’. La mesa será cargado en el servidor y accesibles, como una tabla temporal con el nombre de ‘name’. - -Ejemplos: - -``` bash -$ echo -ne "1\n2\n3\n" | clickhouse-client --query="SELECT count() FROM test.visits WHERE TraficSourceID IN _data" --external --file=- --types=Int8 -849897 -$ cat /etc/passwd | sed 's/:/\t/g' | clickhouse-client --query="SELECT shell, count() AS c FROM passwd GROUP BY shell ORDER BY c DESC" --external --file=- --name=passwd --structure='login String, unused String, uid UInt16, gid UInt16, comment String, home String, shell String' -/bin/sh 20 -/bin/false 5 -/bin/bash 4 -/usr/sbin/nologin 1 -/bin/sync 1 -``` - -Cuando se utiliza la interfaz HTTP, los datos externos se pasan en el formato multipart/form-data. Cada tabla se transmite como un archivo separado. El nombre de la tabla se toma del nombre del archivo. El ‘query_string’ se pasa los parámetros ‘name_format’, ‘name_types’, y ‘name_structure’, donde ‘name’ es el nombre de la tabla a la que corresponden estos parámetros. El significado de los parámetros es el mismo que cuando se usa el cliente de línea de comandos. - -Ejemplo: - -``` bash -$ cat /etc/passwd | sed 's/:/\t/g' > passwd.tsv - -$ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count()+AS+c+FROM+passwd+GROUP+BY+shell+ORDER+BY+c+DESC&passwd_structure=login+String,+unused+String,+uid+UInt16,+gid+UInt16,+comment+String,+home+String,+shell+String' -/bin/sh 20 -/bin/false 5 -/bin/bash 4 -/usr/sbin/nologin 1 -/bin/sync 1 -``` - -Para el procesamiento de consultas distribuidas, las tablas temporales se envían a todos los servidores remotos. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/external_data/) diff --git a/docs/es/engines/table-engines/special/file.md b/docs/es/engines/table-engines/special/file.md deleted file mode 100644 index fb739506a22..00000000000 --- a/docs/es/engines/table-engines/special/file.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 37 -toc_title: File ---- - -# File {#table_engines-file} - -El motor de tabla de archivos mantiene los datos en un archivo en uno de los [file -formato](../../../interfaces/formats.md#formats) (TabSeparated, Native, etc.). - -Ejemplos de uso: - -- Exportación de datos de ClickHouse a archivo. -- Convertir datos de un formato a otro. -- Actualización de datos en ClickHouse mediante la edición de un archivo en un disco. - -## Uso en el servidor ClickHouse {#usage-in-clickhouse-server} - -``` sql -File(Format) -``` - -El `Format` parámetro especifica uno de los formatos de archivo disponibles. Realizar -`SELECT` consultas, el formato debe ser compatible para la entrada, y para realizar -`INSERT` queries – for output. The available formats are listed in the -[Formato](../../../interfaces/formats.md#formats) apartado. - -ClickHouse no permite especificar la ruta del sistema de archivos para`File`. Utilizará la carpeta definida por [camino](../../../operations/server-configuration-parameters/settings.md) configuración en la configuración del servidor. - -Al crear una tabla usando `File(Format)` crea un subdirectorio vacío en esa carpeta. Cuando los datos se escriben en esa tabla, se colocan en `data.Format` en ese subdirectorio. - -Puede crear manualmente esta subcarpeta y archivo en el sistema de archivos del servidor y luego [ATTACH](../../../sql-reference/statements/misc.md) para mostrar información con el nombre coincidente, para que pueda consultar datos desde ese archivo. - -!!! warning "Advertencia" - Tenga cuidado con esta funcionalidad, ya que ClickHouse no realiza un seguimiento de los cambios externos en dichos archivos. El resultado de las escrituras simultáneas a través de ClickHouse y fuera de ClickHouse no está definido. - -**Ejemplo:** - -**1.** Configurar el `file_engine_table` tabla: - -``` sql -CREATE TABLE file_engine_table (name String, value UInt32) ENGINE=File(TabSeparated) -``` - -Por defecto, ClickHouse creará una carpeta `/var/lib/clickhouse/data/default/file_engine_table`. - -**2.** Crear manualmente `/var/lib/clickhouse/data/default/file_engine_table/data.TabSeparated` contener: - -``` bash -$ cat data.TabSeparated -one 1 -two 2 -``` - -**3.** Consultar los datos: - -``` sql -SELECT * FROM file_engine_table -``` - -``` text -┌─name─┬─value─┐ -│ one │ 1 │ -│ two │ 2 │ -└──────┴───────┘ -``` - -## Uso en ClickHouse-local {#usage-in-clickhouse-local} - -En [Sistema abierto.](../../../operations/utilities/clickhouse-local.md#clickhouse-local) El motor de archivos acepta la ruta del archivo además de `Format`. Los flujos de entrada / salida predeterminados se pueden especificar utilizando nombres numéricos o legibles por humanos como `0` o `stdin`, `1` o `stdout`. -**Ejemplo:** - -``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" -``` - -## Detalles de la implementación {#details-of-implementation} - -- Multiple `SELECT` las consultas se pueden realizar simultáneamente, pero `INSERT` las consultas se esperarán entre sí. -- Apoyado la creación de nuevos archivos por `INSERT` consulta. -- Si el archivo existe, `INSERT` añadiría nuevos valores en él. -- No soportado: - - `ALTER` - - `SELECT ... SAMPLE` - - Indice - - Replicación - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/file/) diff --git a/docs/es/engines/table-engines/special/generate.md b/docs/es/engines/table-engines/special/generate.md deleted file mode 100644 index 67e664284b4..00000000000 --- a/docs/es/engines/table-engines/special/generate.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 46 -toc_title: GenerateRandom ---- - -# Generaterandom {#table_engines-generate} - -El motor de tabla GenerateRandom produce datos aleatorios para el esquema de tabla determinado. - -Ejemplos de uso: - -- Se usa en la prueba para poblar una tabla grande reproducible. -- Generar entrada aleatoria para pruebas de fuzzing. - -## Uso en el servidor ClickHouse {#usage-in-clickhouse-server} - -``` sql -ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) -``` - -El `max_array_length` y `max_string_length` parámetros especifican la longitud máxima de todos -columnas y cadenas de matriz correspondientemente en los datos generados. - -Generar motor de tabla sólo admite `SELECT` consulta. - -Es compatible con todos [Tipos de datos](../../../sql-reference/data-types/index.md) que se pueden almacenar en una tabla excepto `LowCardinality` y `AggregateFunction`. - -**Ejemplo:** - -**1.** Configurar el `generate_engine_table` tabla: - -``` sql -CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) -``` - -**2.** Consultar los datos: - -``` sql -SELECT * FROM generate_engine_table LIMIT 3 -``` - -``` text -┌─name─┬──────value─┐ -│ c4xJ │ 1412771199 │ -│ r │ 1791099446 │ -│ 7#$ │ 124312908 │ -└──────┴────────────┘ -``` - -## Detalles de la implementación {#details-of-implementation} - -- No soportado: - - `ALTER` - - `SELECT ... SAMPLE` - - `INSERT` - - Indice - - Replicación - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/es/engines/table-engines/special/index.md b/docs/es/engines/table-engines/special/index.md deleted file mode 100644 index 9927a1f61d9..00000000000 --- a/docs/es/engines/table-engines/special/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Especial -toc_priority: 31 ---- - - diff --git a/docs/es/engines/table-engines/special/join.md b/docs/es/engines/table-engines/special/join.md deleted file mode 100644 index 83e21b7c8cc..00000000000 --- a/docs/es/engines/table-engines/special/join.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 40 -toc_title: Unir ---- - -# Unir {#join} - -Estructura de datos preparada para usar en [JOIN](../../../sql-reference/statements/select/join.md#select-join) operación. - -## Creación de una tabla {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], -) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) -``` - -Vea la descripción detallada del [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) consulta. - -**Parámetros del motor** - -- `join_strictness` – [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-types). -- `join_type` – [Tipo de unión](../../../sql-reference/statements/select/join.md#select-join-types). -- `k1[, k2, ...]` – Key columns from the `USING` cláusula que el `JOIN` operación se hace con. - -Entrar `join_strictness` y `join_type` parámetros sin comillas, por ejemplo, `Join(ANY, LEFT, col1)`. Deben coincidir con el `JOIN` operación para la que se utilizará la tabla. Si los parámetros no coinciden, ClickHouse no lanza una excepción y puede devolver datos incorrectos. - -## Uso de la tabla {#table-usage} - -### Ejemplo {#example} - -Creación de la tabla del lado izquierdo: - -``` sql -CREATE TABLE id_val(`id` UInt32, `val` UInt32) ENGINE = TinyLog -``` - -``` sql -INSERT INTO id_val VALUES (1,11)(2,12)(3,13) -``` - -Creando el lado derecho `Join` tabla: - -``` sql -CREATE TABLE id_val_join(`id` UInt32, `val` UInt8) ENGINE = Join(ANY, LEFT, id) -``` - -``` sql -INSERT INTO id_val_join VALUES (1,21)(1,22)(3,23) -``` - -Unirse a las tablas: - -``` sql -SELECT * FROM id_val ANY LEFT JOIN id_val_join USING (id) SETTINGS join_use_nulls = 1 -``` - -``` text -┌─id─┬─val─┬─id_val_join.val─┐ -│ 1 │ 11 │ 21 │ -│ 2 │ 12 │ ᴺᵁᴸᴸ │ -│ 3 │ 13 │ 23 │ -└────┴─────┴─────────────────┘ -``` - -Como alternativa, puede recuperar datos del `Join` tabla, especificando el valor de la clave de unión: - -``` sql -SELECT joinGet('id_val_join', 'val', toUInt32(1)) -``` - -``` text -┌─joinGet('id_val_join', 'val', toUInt32(1))─┐ -│ 21 │ -└────────────────────────────────────────────┘ -``` - -### Selección e inserción de datos {#selecting-and-inserting-data} - -Usted puede utilizar `INSERT` consultas para agregar datos al `Join`-mesas de motor. Si la tabla se creó con el `ANY` estricta, se ignoran los datos de las claves duplicadas. Con el `ALL` estricta, se agregan todas las filas. - -No se puede realizar una `SELECT` consulta directamente desde la tabla. En su lugar, use uno de los siguientes métodos: - -- Coloque la mesa hacia el lado derecho en un `JOIN` clausula. -- Llame al [joinGet](../../../sql-reference/functions/other-functions.md#joinget) función, que le permite extraer datos de la tabla de la misma manera que de un diccionario. - -### Limitaciones y ajustes {#join-limitations-and-settings} - -Al crear una tabla, se aplican los siguientes valores: - -- [Sistema abierto.](../../../operations/settings/settings.md#join_use_nulls) -- [Método de codificación de datos:](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) -- [Método de codificación de datos:](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) -- [join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode) -- [join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row) - -El `Join`-las tablas del motor no se pueden usar en `GLOBAL JOIN` operación. - -El `Join`-motor permite el uso [Sistema abierto.](../../../operations/settings/settings.md#join_use_nulls) ajuste en el `CREATE TABLE` instrucción. Y [SELECT](../../../sql-reference/statements/select/index.md) consulta permite el uso `join_use_nulls` demasiado. Si tienes diferentes `join_use_nulls` configuración, puede obtener un error al unirse a la tabla. Depende del tipo de JOIN. Cuando se utiliza [joinGet](../../../sql-reference/functions/other-functions.md#joinget) función, usted tiene que utilizar el mismo `join_use_nulls` ajuste en `CRATE TABLE` y `SELECT` instrucción. - -## Almacenamiento de datos {#data-storage} - -`Join` datos de la tabla siempre se encuentra en la memoria RAM. Al insertar filas en una tabla, ClickHouse escribe bloques de datos en el directorio del disco para que puedan restaurarse cuando se reinicie el servidor. - -Si el servidor se reinicia incorrectamente, el bloque de datos en el disco puede perderse o dañarse. En este caso, es posible que deba eliminar manualmente el archivo con datos dañados. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/es/engines/table-engines/special/materializedview.md b/docs/es/engines/table-engines/special/materializedview.md deleted file mode 100644 index 87e5218eb6a..00000000000 --- a/docs/es/engines/table-engines/special/materializedview.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 43 -toc_title: "M\xE9todo de codificaci\xF3n de datos:" ---- - -# Método de codificación de datos: {#materializedview} - -Se utiliza para implementar vistas materializadas (para obtener más información, consulte [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Para almacenar datos, utiliza un motor diferente que se especificó al crear la vista. Al leer desde una tabla, solo usa este motor. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/es/engines/table-engines/special/memory.md b/docs/es/engines/table-engines/special/memory.md deleted file mode 100644 index 3d4f8ddff54..00000000000 --- a/docs/es/engines/table-engines/special/memory.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 44 -toc_title: Memoria ---- - -# Memoria {#memory} - -El motor de memoria almacena datos en RAM, en forma sin comprimir. Los datos se almacenan exactamente en la misma forma en que se reciben cuando se leen. En otras palabras, la lectura de esta tabla es completamente gratuita. -El acceso a los datos simultáneos está sincronizado. Los bloqueos son cortos: las operaciones de lectura y escritura no se bloquean entre sí. -Los índices no son compatibles. La lectura está paralelizada. -La productividad máxima (más de 10 GB/s) se alcanza en consultas simples, porque no hay lectura del disco, descomprimir o deserializar datos. (Cabe señalar que, en muchos casos, la productividad del motor MergeTree es casi tan alta.) -Al reiniciar un servidor, los datos desaparecen de la tabla y la tabla queda vacía. -Normalmente, el uso de este motor de tabla no está justificado. Sin embargo, se puede usar para pruebas y para tareas donde se requiere la velocidad máxima en un número relativamente pequeño de filas (hasta aproximadamente 100,000,000). - -El sistema utiliza el motor de memoria para tablas temporales con datos de consulta externos (consulte la sección “External data for processing a query”), y para la implementación de GLOBAL IN (véase la sección “IN operators”). - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/memory/) diff --git a/docs/es/engines/table-engines/special/merge.md b/docs/es/engines/table-engines/special/merge.md deleted file mode 100644 index 6ed2c272914..00000000000 --- a/docs/es/engines/table-engines/special/merge.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 36 -toc_title: Fusionar ---- - -# Fusionar {#merge} - -El `Merge` motor (no debe confundirse con `MergeTree`) no almacena datos en sí, pero permite leer de cualquier número de otras tablas simultáneamente. -La lectura se paralela automáticamente. No se admite la escritura en una tabla. Al leer, se usan los índices de las tablas que realmente se están leyendo, si existen. -El `Merge` engine acepta parámetros: el nombre de la base de datos y una expresión regular para las tablas. - -Ejemplo: - -``` sql -Merge(hits, '^WatchLog') -``` - -Los datos se leerán de las tablas en el `hits` base de datos que tienen nombres que coinciden con la expresión regular ‘`^WatchLog`’. - -En lugar del nombre de la base de datos, puede usar una expresión constante que devuelva una cadena. Por ejemplo, `currentDatabase()`. - -Regular expressions — [Re2](https://github.com/google/re2) (soporta un subconjunto de PCRE), sensible a mayúsculas y minúsculas. -Vea las notas sobre los símbolos de escape en expresiones regulares en el “match” apartado. - -Al seleccionar tablas para leer, el `Merge` no se seleccionará la tabla en sí, incluso si coincide con la expresión regular. Esto es para evitar bucles. -Es posible crear dos `Merge` tablas que intentarán interminablemente leer los datos de los demás, pero esta no es una buena idea. - -La forma típica de usar el `Merge` para trabajar con un gran número de `TinyLog` tablas como si con una sola tabla. - -Ejemplo 2: - -Digamos que tiene una tabla antigua (WatchLog_old) y decidió cambiar la partición sin mover datos a una nueva tabla (WatchLog_new) y necesita ver datos de ambas tablas. - -``` sql -CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) -ENGINE=MergeTree(date, (UserId, EventType), 8192); -INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); - -CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) -ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; -INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); - -CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); - -SELECT * -FROM WatchLog -``` - -``` text -┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ -│ 2018-01-01 │ 1 │ hit │ 3 │ -└────────────┴────────┴───────────┴─────┘ -┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ -│ 2018-01-02 │ 2 │ hit │ 3 │ -└────────────┴────────┴───────────┴─────┘ -``` - -## Virtual Columnas {#virtual-columns} - -- `_table` — Contains the name of the table from which data was read. Type: [Cadena](../../../sql-reference/data-types/string.md). - - Puede establecer las condiciones constantes en `_table` en el `WHERE/PREWHERE` cláusula (por ejemplo, `WHERE _table='xyz'`). En este caso, la operación de lectura se realiza sólo para las tablas donde la condición en `_table` está satisfecho, por lo que el `_table` columna actúa como un índice. - -**Ver también** - -- [Virtual columnas](index.md#table_engines-virtual_columns) - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/merge/) diff --git a/docs/es/engines/table-engines/special/null.md b/docs/es/engines/table-engines/special/null.md deleted file mode 100644 index cc05e7839c9..00000000000 --- a/docs/es/engines/table-engines/special/null.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 38 -toc_title: Nulo ---- - -# Nulo {#null} - -Al escribir en una tabla Null, los datos se ignoran. Al leer desde una tabla Null, la respuesta está vacía. - -Sin embargo, puede crear una vista materializada en una tabla Null. Entonces los datos escritos en la tabla terminarán en la vista. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/es/engines/table-engines/special/set.md b/docs/es/engines/table-engines/special/set.md deleted file mode 100644 index 4ff23202443..00000000000 --- a/docs/es/engines/table-engines/special/set.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 39 -toc_title: Establecer ---- - -# Establecer {#set} - -Un conjunto de datos que siempre está en la memoria RAM. Está diseñado para su uso en el lado derecho del operador IN (consulte la sección “IN operators”). - -Puede usar INSERT para insertar datos en la tabla. Se agregarán nuevos elementos al conjunto de datos, mientras que los duplicados se ignorarán. -Pero no puede realizar SELECT desde la tabla. La única forma de recuperar datos es usándolos en la mitad derecha del operador IN. - -Los datos siempre se encuentran en la memoria RAM. Para INSERT, los bloques de datos insertados también se escriben en el directorio de tablas en el disco. Al iniciar el servidor, estos datos se cargan en la RAM. En otras palabras, después de reiniciar, los datos permanecen en su lugar. - -Para un reinicio aproximado del servidor, el bloque de datos en el disco puede perderse o dañarse. En este último caso, es posible que deba eliminar manualmente el archivo con datos dañados. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/es/engines/table-engines/special/url.md b/docs/es/engines/table-engines/special/url.md deleted file mode 100644 index 654b8e99a4e..00000000000 --- a/docs/es/engines/table-engines/special/url.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 41 -toc_title: URL ---- - -# URL(URL, Formato) {#table_engines-url} - -Administra datos en un servidor HTTP/HTTPS remoto. Este motor es similar -a la [File](file.md) motor. - -## Uso del motor en el servidor ClickHouse {#using-the-engine-in-the-clickhouse-server} - -El `format` debe ser uno que ClickHouse pueda usar en -`SELECT` consultas y, si es necesario, en `INSERTs`. Para obtener la lista completa de formatos admitidos, consulte -[Formato](../../../interfaces/formats.md#formats). - -El `URL` debe ajustarse a la estructura de un localizador uniforme de recursos. La dirección URL especificada debe apuntar a un servidor -que utiliza HTTP o HTTPS. Esto no requiere ningún -encabezados adicionales para obtener una respuesta del servidor. - -`INSERT` y `SELECT` las consultas se transforman en `POST` y `GET` peticiones, -respectivamente. Para el procesamiento `POST` solicitudes, el servidor remoto debe admitir -[Codificación de transferencia fragmentada](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). - -Puede limitar el número máximo de saltos de redirección HTTP GET utilizando el [Nombre de la red inalámbrica (SSID):](../../../operations/settings/settings.md#setting-max_http_get_redirects) configuración. - -**Ejemplo:** - -**1.** Crear un `url_engine_table` tabla en el servidor : - -``` sql -CREATE TABLE url_engine_table (word String, value UInt64) -ENGINE=URL('http://127.0.0.1:12345/', CSV) -``` - -**2.** Cree un servidor HTTP básico utilizando las herramientas estándar de Python 3 y -comenzarlo: - -``` python3 -from http.server import BaseHTTPRequestHandler, HTTPServer - -class CSVHTTPServer(BaseHTTPRequestHandler): - def do_GET(self): - self.send_response(200) - self.send_header('Content-type', 'text/csv') - self.end_headers() - - self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) - -if __name__ == "__main__": - server_address = ('127.0.0.1', 12345) - HTTPServer(server_address, CSVHTTPServer).serve_forever() -``` - -``` bash -$ python3 server.py -``` - -**3.** Solicitar datos: - -``` sql -SELECT * FROM url_engine_table -``` - -``` text -┌─word──┬─value─┐ -│ Hello │ 1 │ -│ World │ 2 │ -└───────┴───────┘ -``` - -## Detalles de la implementación {#details-of-implementation} - -- Las lecturas y escrituras pueden ser paralelas -- No soportado: - - `ALTER` y `SELECT...SAMPLE` operación. - - Índices. - - Replicación. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/url/) diff --git a/docs/es/engines/table-engines/special/view.md b/docs/es/engines/table-engines/special/view.md deleted file mode 100644 index dbb496bcca4..00000000000 --- a/docs/es/engines/table-engines/special/view.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 42 -toc_title: Vista ---- - -# Vista {#table_engines-view} - -Se utiliza para implementar vistas (para obtener más información, consulte `CREATE VIEW query`). No almacena datos, pero solo almacena los datos especificados `SELECT` consulta. Al leer desde una tabla, ejecuta esta consulta (y elimina todas las columnas innecesarias de la consulta). - -[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/es/faq/general.md b/docs/es/faq/general.md deleted file mode 100644 index f8446e99152..00000000000 --- a/docs/es/faq/general.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 78 -toc_title: Preguntas generales ---- - -# Preguntas generales {#general-questions} - -## ¿Por qué no usar algo como MapReduce? {#why-not-use-something-like-mapreduce} - -Podemos referirnos a sistemas como MapReduce como sistemas informáticos distribuidos en los que la operación de reducción se basa en la clasificación distribuida. La solución de código abierto más común en esta clase es [Acerca de nosotros](http://hadoop.apache.org). Yandex utiliza su solución interna, YT. - -Estos sistemas no son apropiados para consultas en línea debido a su alta latencia. En otras palabras, no se pueden usar como back-end para una interfaz web. Estos tipos de sistemas no son útiles para actualizaciones de datos en tiempo real. La clasificación distribuida no es la mejor manera de realizar operaciones de reducción si el resultado de la operación y todos los resultados intermedios (si los hay) se encuentran en la RAM de un único servidor, que generalmente es el caso de las consultas en línea. En tal caso, una tabla hash es una forma óptima de realizar operaciones de reducción. Un enfoque común para optimizar las tareas de reducción de mapas es la preagregación (reducción parcial) utilizando una tabla hash en RAM. El usuario realiza esta optimización manualmente. La clasificación distribuida es una de las principales causas de un rendimiento reducido cuando se ejecutan tareas simples de reducción de mapas. - -La mayoría de las implementaciones de MapReduce le permiten ejecutar código arbitrario en un clúster. Pero un lenguaje de consulta declarativo es más adecuado para OLAP para ejecutar experimentos rápidamente. Por ejemplo, Hadoop tiene Hive y Pig. También considere Cloudera Impala o Shark (obsoleto) para Spark, así como Spark SQL, Presto y Apache Drill. El rendimiento cuando se ejecutan tales tareas es muy subóptimo en comparación con los sistemas especializados, pero la latencia relativamente alta hace que sea poco realista utilizar estos sistemas como back-end para una interfaz web. - -## ¿Qué sucede si tengo un problema con las codificaciones al usar Oracle a través de ODBC? {#oracle-odbc-encodings} - -Si utiliza Oracle a través del controlador ODBC como fuente de diccionarios externos, debe establecer el valor `NLS_LANG` variable de entorno en `/etc/default/clickhouse`. Para obtener más información, consulte [Oracle NLS_LANG Preguntas frecuentes](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). - -**Ejemplo** - -``` sql -NLS_LANG=RUSSIAN_RUSSIA.UTF8 -``` - -## Cómo exporto datos de ClickHouse a un archivo? {#how-to-export-to-file} - -### Uso de la cláusula INTO OUTFILE {#using-into-outfile-clause} - -Añadir un [INTO OUTFILE](../sql-reference/statements/select/into-outfile.md#into-outfile-clause) cláusula a su consulta. - -Por ejemplo: - -``` sql -SELECT * FROM table INTO OUTFILE 'file' -``` - -De forma predeterminada, ClickHouse usa el [TabSeparated](../interfaces/formats.md#tabseparated) formato de datos de salida. Para seleccionar el [formato de datos](../interfaces/formats.md), utilizar el [Cláusula FORMAT](../sql-reference/statements/select/format.md#format-clause). - -Por ejemplo: - -``` sql -SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV -``` - -### Uso de una tabla de motor de archivo {#using-a-file-engine-table} - -Ver [File](../engines/table-engines/special/file.md). - -### Uso de la redirección de línea de comandos {#using-command-line-redirection} - -``` sql -$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt -``` - -Ver [Casa de clics-cliente](../interfaces/cli.md). - -{## [Artículo Original](https://clickhouse.tech/docs/en/faq/general/) ##} diff --git a/docs/es/faq/index.md b/docs/es/faq/index.md deleted file mode 100644 index a44dbb31e89..00000000000 --- a/docs/es/faq/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: F.A.Q. -toc_priority: 76 ---- - - diff --git a/docs/es/getting-started/example-datasets/amplab-benchmark.md b/docs/es/getting-started/example-datasets/amplab-benchmark.md deleted file mode 100644 index 066bf036266..00000000000 --- a/docs/es/getting-started/example-datasets/amplab-benchmark.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 17 -toc_title: Referencia de Big Data de AMPLab ---- - -# Referencia de Big Data de AMPLab {#amplab-big-data-benchmark} - -Ver https://amplab.cs.berkeley.edu/benchmark/ - -Regístrese para obtener una cuenta gratuita en https://aws.amazon.com. Requiere una tarjeta de crédito, correo electrónico y número de teléfono. Obtenga una nueva clave de acceso en https://console.aws.amazon.com/iam/home?nc2=h_m_sc#security_credential - -Ejecute lo siguiente en la consola: - -``` bash -$ sudo apt-get install s3cmd -$ mkdir tiny; cd tiny; -$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/tiny/ . -$ cd .. -$ mkdir 1node; cd 1node; -$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/1node/ . -$ cd .. -$ mkdir 5nodes; cd 5nodes; -$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/5nodes/ . -$ cd .. -``` - -Ejecute las siguientes consultas de ClickHouse: - -``` sql -CREATE TABLE rankings_tiny -( - pageURL String, - pageRank UInt32, - avgDuration UInt32 -) ENGINE = Log; - -CREATE TABLE uservisits_tiny -( - sourceIP String, - destinationURL String, - visitDate Date, - adRevenue Float32, - UserAgent String, - cCode FixedString(3), - lCode FixedString(6), - searchWord String, - duration UInt32 -) ENGINE = MergeTree(visitDate, visitDate, 8192); - -CREATE TABLE rankings_1node -( - pageURL String, - pageRank UInt32, - avgDuration UInt32 -) ENGINE = Log; - -CREATE TABLE uservisits_1node -( - sourceIP String, - destinationURL String, - visitDate Date, - adRevenue Float32, - UserAgent String, - cCode FixedString(3), - lCode FixedString(6), - searchWord String, - duration UInt32 -) ENGINE = MergeTree(visitDate, visitDate, 8192); - -CREATE TABLE rankings_5nodes_on_single -( - pageURL String, - pageRank UInt32, - avgDuration UInt32 -) ENGINE = Log; - -CREATE TABLE uservisits_5nodes_on_single -( - sourceIP String, - destinationURL String, - visitDate Date, - adRevenue Float32, - UserAgent String, - cCode FixedString(3), - lCode FixedString(6), - searchWord String, - duration UInt32 -) ENGINE = MergeTree(visitDate, visitDate, 8192); -``` - -Volver a la consola: - -``` bash -$ for i in tiny/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_tiny FORMAT CSV"; done -$ for i in tiny/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_tiny FORMAT CSV"; done -$ for i in 1node/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_1node FORMAT CSV"; done -$ for i in 1node/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_1node FORMAT CSV"; done -$ for i in 5nodes/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_5nodes_on_single FORMAT CSV"; done -$ for i in 5nodes/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_5nodes_on_single FORMAT CSV"; done -``` - -Consultas para obtener muestras de datos: - -``` sql -SELECT pageURL, pageRank FROM rankings_1node WHERE pageRank > 1000 - -SELECT substring(sourceIP, 1, 8), sum(adRevenue) FROM uservisits_1node GROUP BY substring(sourceIP, 1, 8) - -SELECT - sourceIP, - sum(adRevenue) AS totalRevenue, - avg(pageRank) AS pageRank -FROM rankings_1node ALL INNER JOIN -( - SELECT - sourceIP, - destinationURL AS pageURL, - adRevenue - FROM uservisits_1node - WHERE (visitDate > '1980-01-01') AND (visitDate < '1980-04-01') -) USING pageURL -GROUP BY sourceIP -ORDER BY totalRevenue DESC -LIMIT 1 -``` - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) diff --git a/docs/es/getting-started/example-datasets/criteo.md b/docs/es/getting-started/example-datasets/criteo.md deleted file mode 100644 index 79203b0276d..00000000000 --- a/docs/es/getting-started/example-datasets/criteo.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 19 -toc_title: Registros de clics de Terabyte de Criteo ---- - -# Terabyte de registros de clics de Criteo {#terabyte-of-click-logs-from-criteo} - -Descargue los datos de http://labs.criteo.com/downloads/download-terabyte-click-logs/ - -Cree una tabla para importar el registro: - -``` sql -CREATE TABLE criteo_log (date Date, clicked UInt8, int1 Int32, int2 Int32, int3 Int32, int4 Int32, int5 Int32, int6 Int32, int7 Int32, int8 Int32, int9 Int32, int10 Int32, int11 Int32, int12 Int32, int13 Int32, cat1 String, cat2 String, cat3 String, cat4 String, cat5 String, cat6 String, cat7 String, cat8 String, cat9 String, cat10 String, cat11 String, cat12 String, cat13 String, cat14 String, cat15 String, cat16 String, cat17 String, cat18 String, cat19 String, cat20 String, cat21 String, cat22 String, cat23 String, cat24 String, cat25 String, cat26 String) ENGINE = Log -``` - -Descargar los datos: - -``` bash -$ for i in {00..23}; do echo $i; zcat datasets/criteo/day_${i#0}.gz | sed -r 's/^/2000-01-'${i/00/24}'\t/' | clickhouse-client --host=example-perftest01j --query="INSERT INTO criteo_log FORMAT TabSeparated"; done -``` - -Crear una tabla para los datos convertidos: - -``` sql -CREATE TABLE criteo -( - date Date, - clicked UInt8, - int1 Int32, - int2 Int32, - int3 Int32, - int4 Int32, - int5 Int32, - int6 Int32, - int7 Int32, - int8 Int32, - int9 Int32, - int10 Int32, - int11 Int32, - int12 Int32, - int13 Int32, - icat1 UInt32, - icat2 UInt32, - icat3 UInt32, - icat4 UInt32, - icat5 UInt32, - icat6 UInt32, - icat7 UInt32, - icat8 UInt32, - icat9 UInt32, - icat10 UInt32, - icat11 UInt32, - icat12 UInt32, - icat13 UInt32, - icat14 UInt32, - icat15 UInt32, - icat16 UInt32, - icat17 UInt32, - icat18 UInt32, - icat19 UInt32, - icat20 UInt32, - icat21 UInt32, - icat22 UInt32, - icat23 UInt32, - icat24 UInt32, - icat25 UInt32, - icat26 UInt32 -) ENGINE = MergeTree(date, intHash32(icat1), (date, intHash32(icat1)), 8192) -``` - -Transforme los datos del registro sin procesar y colóquelos en la segunda tabla: - -``` sql -INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int7, int8, int9, int10, int11, int12, int13, reinterpretAsUInt32(unhex(cat1)) AS icat1, reinterpretAsUInt32(unhex(cat2)) AS icat2, reinterpretAsUInt32(unhex(cat3)) AS icat3, reinterpretAsUInt32(unhex(cat4)) AS icat4, reinterpretAsUInt32(unhex(cat5)) AS icat5, reinterpretAsUInt32(unhex(cat6)) AS icat6, reinterpretAsUInt32(unhex(cat7)) AS icat7, reinterpretAsUInt32(unhex(cat8)) AS icat8, reinterpretAsUInt32(unhex(cat9)) AS icat9, reinterpretAsUInt32(unhex(cat10)) AS icat10, reinterpretAsUInt32(unhex(cat11)) AS icat11, reinterpretAsUInt32(unhex(cat12)) AS icat12, reinterpretAsUInt32(unhex(cat13)) AS icat13, reinterpretAsUInt32(unhex(cat14)) AS icat14, reinterpretAsUInt32(unhex(cat15)) AS icat15, reinterpretAsUInt32(unhex(cat16)) AS icat16, reinterpretAsUInt32(unhex(cat17)) AS icat17, reinterpretAsUInt32(unhex(cat18)) AS icat18, reinterpretAsUInt32(unhex(cat19)) AS icat19, reinterpretAsUInt32(unhex(cat20)) AS icat20, reinterpretAsUInt32(unhex(cat21)) AS icat21, reinterpretAsUInt32(unhex(cat22)) AS icat22, reinterpretAsUInt32(unhex(cat23)) AS icat23, reinterpretAsUInt32(unhex(cat24)) AS icat24, reinterpretAsUInt32(unhex(cat25)) AS icat25, reinterpretAsUInt32(unhex(cat26)) AS icat26 FROM criteo_log; - -DROP TABLE criteo_log; -``` - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) diff --git a/docs/es/getting-started/example-datasets/index.md b/docs/es/getting-started/example-datasets/index.md deleted file mode 100644 index 28e06987af1..00000000000 --- a/docs/es/getting-started/example-datasets/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Datos De Ejemplo -toc_priority: 12 -toc_title: "Implantaci\xF3n" ---- - -# Datos De Ejemplo {#example-datasets} - -En esta sección se describe cómo obtener conjuntos de datos de ejemplo e importarlos a ClickHouse. -Para algunos conjuntos de datos también están disponibles consultas de ejemplo. - -- [Yandex anonimizado.Conjunto de datos de Metrica](metrica.md) -- [Estrella Schema Benchmark](star-schema.md) -- [Nombre de la red inalámbrica (SSID):](wikistat.md) -- [Terabyte de registros de clics de Criteo](criteo.md) -- [Referencia de Big Data de AMPLab](amplab-benchmark.md) -- [Datos de taxis de Nueva York](nyc-taxi.md) -- [A tiempo](ontime.md) - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/es/getting-started/example-datasets/metrica.md b/docs/es/getting-started/example-datasets/metrica.md deleted file mode 100644 index 0b3bc8b6833..00000000000 --- a/docs/es/getting-started/example-datasets/metrica.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 14 -toc_title: El Yandex.Metrica Datos ---- - -# Yandex anonimizado.Metrica Datos {#anonymized-yandex-metrica-data} - -El conjunto de datos consta de dos tablas que contienen datos anónimos sobre los hits (`hits_v1`) y visitas (`visits_v1`) el Yandex.Métrica. Puedes leer más sobre Yandex.Metrica en [Historial de ClickHouse](../../introduction/history.md) apartado. - -El conjunto de datos consta de dos tablas, cualquiera de ellas se puede descargar como `tsv.xz` o como particiones preparadas. Además, una versión extendida de la `hits` La tabla que contiene 100 millones de filas está disponible como TSV en https://datasets.clickhouse.tech/hits/tsv/hits_100m_obfuscated_v1.tsv.xz y como particiones preparadas en https://datasets.clickhouse.tech/hits/partitions/hits_100m_obfuscated_v1.tar.xz. - -## Obtención de tablas a partir de particiones preparadas {#obtaining-tables-from-prepared-partitions} - -Descargar e importar tabla de hits: - -``` bash -curl -O https://datasets.clickhouse.tech/hits/partitions/hits_v1.tar -tar xvf hits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory -# check permissions on unpacked data, fix if required -sudo service clickhouse-server restart -clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" -``` - -Descargar e importar visitas: - -``` bash -curl -O https://datasets.clickhouse.tech/visits/partitions/visits_v1.tar -tar xvf visits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory -# check permissions on unpacked data, fix if required -sudo service clickhouse-server restart -clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" -``` - -## Obtención de tablas a partir de un archivo TSV comprimido {#obtaining-tables-from-compressed-tsv-file} - -Descargar e importar hits desde un archivo TSV comprimido: - -``` bash -curl https://datasets.clickhouse.tech/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv -# now create table -clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" -clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" -# import data -cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000 -# optionally you can optimize table -clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL" -clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" -``` - -Descargue e importe visitas desde un archivo tsv comprimido: - -``` bash -curl https://datasets.clickhouse.tech/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv -# now create table -clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" -clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" -# import data -cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000 -# optionally you can optimize table -clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" -clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" -``` - -## Consultas de ejemplo {#example-queries} - -[Tutorial de ClickHouse](../../getting-started/tutorial.md) se basa en Yandex.El conjunto de datos de Metrica y la forma recomendada de comenzar con este conjunto de datos es simplemente pasar por el tutorial. - -Se pueden encontrar ejemplos adicionales de consultas a estas tablas entre [pruebas estatales](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) de ClickHouse (se nombran `test.hists` y `test.visits` alli). diff --git a/docs/es/getting-started/example-datasets/nyc-taxi.md b/docs/es/getting-started/example-datasets/nyc-taxi.md deleted file mode 100644 index c6441311c96..00000000000 --- a/docs/es/getting-started/example-datasets/nyc-taxi.md +++ /dev/null @@ -1,390 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 16 -toc_title: Datos de taxis de Nueva York ---- - -# Datos de taxis de Nueva York {#new-york-taxi-data} - -Este conjunto de datos se puede obtener de dos maneras: - -- importación de datos sin procesar -- descarga de particiones preparadas - -## Cómo importar los datos sin procesar {#how-to-import-the-raw-data} - -Consulte https://github.com/toddwschneider/nyc-taxi-data y http://tech.marksblogg.com/billion-nyc-taxi-rides-redshift.html para obtener la descripción de un conjunto de datos e instrucciones para descargar. - -La descarga dará como resultado aproximadamente 227 GB de datos sin comprimir en archivos CSV. La descarga tarda aproximadamente una hora en una conexión de 1 Gbit (la descarga paralela de s3.amazonaws.com recupera al menos la mitad de un canal de 1 Gbit). -Es posible que algunos de los archivos no se descarguen por completo. Verifique los tamaños de archivo y vuelva a descargar cualquiera que parezca dudoso. - -Algunos de los archivos pueden contener filas no válidas. Puede arreglarlos de la siguiente manera: - -``` bash -sed -E '/(.*,){18,}/d' data/yellow_tripdata_2010-02.csv > data/yellow_tripdata_2010-02.csv_ -sed -E '/(.*,){18,}/d' data/yellow_tripdata_2010-03.csv > data/yellow_tripdata_2010-03.csv_ -mv data/yellow_tripdata_2010-02.csv_ data/yellow_tripdata_2010-02.csv -mv data/yellow_tripdata_2010-03.csv_ data/yellow_tripdata_2010-03.csv -``` - -Entonces los datos deben ser preprocesados en PostgreSQL. Esto creará selecciones de puntos en los polígonos (para hacer coincidir los puntos en el mapa con los distritos de la ciudad de Nueva York) y combinará todos los datos en una única tabla plana desnormalizada mediante el uso de una unión. Para hacer esto, deberá instalar PostgreSQL con soporte PostGIS. - -Tenga cuidado al correr `initialize_database.sh` y volver a verificar manualmente que todas las tablas se crearon correctamente. - -Se tarda entre 20 y 30 minutos en procesar los datos de cada mes en PostgreSQL, por un total de aproximadamente 48 horas. - -Puede comprobar el número de filas descargadas de la siguiente manera: - -``` bash -$ time psql nyc-taxi-data -c "SELECT count(*) FROM trips;" -## Count - 1298979494 -(1 row) - -real 7m9.164s -``` - -(Esto es un poco más de 1.1 mil millones de filas reportadas por Mark Litwintschik en una serie de publicaciones de blog.) - -Los datos en PostgreSQL utilizan 370 GB de espacio. - -Exportación de los datos de PostgreSQL: - -``` sql -COPY -( - SELECT trips.id, - trips.vendor_id, - trips.pickup_datetime, - trips.dropoff_datetime, - trips.store_and_fwd_flag, - trips.rate_code_id, - trips.pickup_longitude, - trips.pickup_latitude, - trips.dropoff_longitude, - trips.dropoff_latitude, - trips.passenger_count, - trips.trip_distance, - trips.fare_amount, - trips.extra, - trips.mta_tax, - trips.tip_amount, - trips.tolls_amount, - trips.ehail_fee, - trips.improvement_surcharge, - trips.total_amount, - trips.payment_type, - trips.trip_type, - trips.pickup, - trips.dropoff, - - cab_types.type cab_type, - - weather.precipitation_tenths_of_mm rain, - weather.snow_depth_mm, - weather.snowfall_mm, - weather.max_temperature_tenths_degrees_celsius max_temp, - weather.min_temperature_tenths_degrees_celsius min_temp, - weather.average_wind_speed_tenths_of_meters_per_second wind, - - pick_up.gid pickup_nyct2010_gid, - pick_up.ctlabel pickup_ctlabel, - pick_up.borocode pickup_borocode, - pick_up.boroname pickup_boroname, - pick_up.ct2010 pickup_ct2010, - pick_up.boroct2010 pickup_boroct2010, - pick_up.cdeligibil pickup_cdeligibil, - pick_up.ntacode pickup_ntacode, - pick_up.ntaname pickup_ntaname, - pick_up.puma pickup_puma, - - drop_off.gid dropoff_nyct2010_gid, - drop_off.ctlabel dropoff_ctlabel, - drop_off.borocode dropoff_borocode, - drop_off.boroname dropoff_boroname, - drop_off.ct2010 dropoff_ct2010, - drop_off.boroct2010 dropoff_boroct2010, - drop_off.cdeligibil dropoff_cdeligibil, - drop_off.ntacode dropoff_ntacode, - drop_off.ntaname dropoff_ntaname, - drop_off.puma dropoff_puma - FROM trips - LEFT JOIN cab_types - ON trips.cab_type_id = cab_types.id - LEFT JOIN central_park_weather_observations_raw weather - ON weather.date = trips.pickup_datetime::date - LEFT JOIN nyct2010 pick_up - ON pick_up.gid = trips.pickup_nyct2010_gid - LEFT JOIN nyct2010 drop_off - ON drop_off.gid = trips.dropoff_nyct2010_gid -) TO '/opt/milovidov/nyc-taxi-data/trips.tsv'; -``` - -La instantánea de datos se crea a una velocidad de aproximadamente 50 MB por segundo. Al crear la instantánea, PostgreSQL lee desde el disco a una velocidad de aproximadamente 28 MB por segundo. -Esto toma alrededor de 5 horas. El archivo TSV resultante es 590612904969 bytes. - -Crear una tabla temporal en ClickHouse: - -``` sql -CREATE TABLE trips -( -trip_id UInt32, -vendor_id String, -pickup_datetime DateTime, -dropoff_datetime Nullable(DateTime), -store_and_fwd_flag Nullable(FixedString(1)), -rate_code_id Nullable(UInt8), -pickup_longitude Nullable(Float64), -pickup_latitude Nullable(Float64), -dropoff_longitude Nullable(Float64), -dropoff_latitude Nullable(Float64), -passenger_count Nullable(UInt8), -trip_distance Nullable(Float64), -fare_amount Nullable(Float32), -extra Nullable(Float32), -mta_tax Nullable(Float32), -tip_amount Nullable(Float32), -tolls_amount Nullable(Float32), -ehail_fee Nullable(Float32), -improvement_surcharge Nullable(Float32), -total_amount Nullable(Float32), -payment_type Nullable(String), -trip_type Nullable(UInt8), -pickup Nullable(String), -dropoff Nullable(String), -cab_type Nullable(String), -precipitation Nullable(UInt8), -snow_depth Nullable(UInt8), -snowfall Nullable(UInt8), -max_temperature Nullable(UInt8), -min_temperature Nullable(UInt8), -average_wind_speed Nullable(UInt8), -pickup_nyct2010_gid Nullable(UInt8), -pickup_ctlabel Nullable(String), -pickup_borocode Nullable(UInt8), -pickup_boroname Nullable(String), -pickup_ct2010 Nullable(String), -pickup_boroct2010 Nullable(String), -pickup_cdeligibil Nullable(FixedString(1)), -pickup_ntacode Nullable(String), -pickup_ntaname Nullable(String), -pickup_puma Nullable(String), -dropoff_nyct2010_gid Nullable(UInt8), -dropoff_ctlabel Nullable(String), -dropoff_borocode Nullable(UInt8), -dropoff_boroname Nullable(String), -dropoff_ct2010 Nullable(String), -dropoff_boroct2010 Nullable(String), -dropoff_cdeligibil Nullable(String), -dropoff_ntacode Nullable(String), -dropoff_ntaname Nullable(String), -dropoff_puma Nullable(String) -) ENGINE = Log; -``` - -Es necesario para convertir campos a tipos de datos más correctos y, si es posible, para eliminar NULL. - -``` bash -$ time clickhouse-client --query="INSERT INTO trips FORMAT TabSeparated" < trips.tsv - -real 75m56.214s -``` - -Los datos se leen a una velocidad de 112-140 Mb / segundo. -La carga de datos en una tabla de tipos de registro en una secuencia tardó 76 minutos. -Los datos de esta tabla utilizan 142 GB. - -(Importar datos directamente desde Postgres también es posible usando `COPY ... TO PROGRAM`.) - -Unfortunately, all the fields associated with the weather (precipitation…average_wind_speed) were filled with NULL. Because of this, we will remove them from the final data set. - -Para empezar, crearemos una tabla en un único servidor. Posteriormente haremos la mesa distribuida. - -Crear y rellenar una tabla de resumen: - -``` sql -CREATE TABLE trips_mergetree -ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) -AS SELECT - -trip_id, -CAST(vendor_id AS Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14)) AS vendor_id, -toDate(pickup_datetime) AS pickup_date, -ifNull(pickup_datetime, toDateTime(0)) AS pickup_datetime, -toDate(dropoff_datetime) AS dropoff_date, -ifNull(dropoff_datetime, toDateTime(0)) AS dropoff_datetime, -assumeNotNull(store_and_fwd_flag) IN ('Y', '1', '2') AS store_and_fwd_flag, -assumeNotNull(rate_code_id) AS rate_code_id, -assumeNotNull(pickup_longitude) AS pickup_longitude, -assumeNotNull(pickup_latitude) AS pickup_latitude, -assumeNotNull(dropoff_longitude) AS dropoff_longitude, -assumeNotNull(dropoff_latitude) AS dropoff_latitude, -assumeNotNull(passenger_count) AS passenger_count, -assumeNotNull(trip_distance) AS trip_distance, -assumeNotNull(fare_amount) AS fare_amount, -assumeNotNull(extra) AS extra, -assumeNotNull(mta_tax) AS mta_tax, -assumeNotNull(tip_amount) AS tip_amount, -assumeNotNull(tolls_amount) AS tolls_amount, -assumeNotNull(ehail_fee) AS ehail_fee, -assumeNotNull(improvement_surcharge) AS improvement_surcharge, -assumeNotNull(total_amount) AS total_amount, -CAST((assumeNotNull(payment_type) AS pt) IN ('CSH', 'CASH', 'Cash', 'CAS', 'Cas', '1') ? 'CSH' : (pt IN ('CRD', 'Credit', 'Cre', 'CRE', 'CREDIT', '2') ? 'CRE' : (pt IN ('NOC', 'No Charge', 'No', '3') ? 'NOC' : (pt IN ('DIS', 'Dispute', 'Dis', '4') ? 'DIS' : 'UNK'))) AS Enum8('CSH' = 1, 'CRE' = 2, 'UNK' = 0, 'NOC' = 3, 'DIS' = 4)) AS payment_type_, -assumeNotNull(trip_type) AS trip_type, -ifNull(toFixedString(unhex(pickup), 25), toFixedString('', 25)) AS pickup, -ifNull(toFixedString(unhex(dropoff), 25), toFixedString('', 25)) AS dropoff, -CAST(assumeNotNull(cab_type) AS Enum8('yellow' = 1, 'green' = 2, 'uber' = 3)) AS cab_type, - -assumeNotNull(pickup_nyct2010_gid) AS pickup_nyct2010_gid, -toFloat32(ifNull(pickup_ctlabel, '0')) AS pickup_ctlabel, -assumeNotNull(pickup_borocode) AS pickup_borocode, -CAST(assumeNotNull(pickup_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS pickup_boroname, -toFixedString(ifNull(pickup_ct2010, '000000'), 6) AS pickup_ct2010, -toFixedString(ifNull(pickup_boroct2010, '0000000'), 7) AS pickup_boroct2010, -CAST(assumeNotNull(ifNull(pickup_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS pickup_cdeligibil, -toFixedString(ifNull(pickup_ntacode, '0000'), 4) AS pickup_ntacode, - -CAST(assumeNotNull(pickup_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS pickup_ntaname, - -toUInt16(ifNull(pickup_puma, '0')) AS pickup_puma, - -assumeNotNull(dropoff_nyct2010_gid) AS dropoff_nyct2010_gid, -toFloat32(ifNull(dropoff_ctlabel, '0')) AS dropoff_ctlabel, -assumeNotNull(dropoff_borocode) AS dropoff_borocode, -CAST(assumeNotNull(dropoff_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS dropoff_boroname, -toFixedString(ifNull(dropoff_ct2010, '000000'), 6) AS dropoff_ct2010, -toFixedString(ifNull(dropoff_boroct2010, '0000000'), 7) AS dropoff_boroct2010, -CAST(assumeNotNull(ifNull(dropoff_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS dropoff_cdeligibil, -toFixedString(ifNull(dropoff_ntacode, '0000'), 4) AS dropoff_ntacode, - -CAST(assumeNotNull(dropoff_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS dropoff_ntaname, - -toUInt16(ifNull(dropoff_puma, '0')) AS dropoff_puma - -FROM trips -``` - -Esto toma 3030 segundos a una velocidad de aproximadamente 428,000 filas por segundo. -Para cargarlo más rápido, puede crear la tabla con el `Log` motor en lugar de `MergeTree`. En este caso, la descarga funciona más rápido que 200 segundos. - -La tabla utiliza 126 GB de espacio en disco. - -``` sql -SELECT formatReadableSize(sum(bytes)) FROM system.parts WHERE table = 'trips_mergetree' AND active -``` - -``` text -┌─formatReadableSize(sum(bytes))─┐ -│ 126.18 GiB │ -└────────────────────────────────┘ -``` - -Entre otras cosas, puede ejecutar la consulta OPTIMIZE en MergeTree. Pero no es necesario ya que todo estará bien sin él. - -## Descarga de Prepared Partitions {#download-of-prepared-partitions} - -``` bash -$ curl -O https://datasets.clickhouse.tech/trips_mergetree/partitions/trips_mergetree.tar -$ tar xvf trips_mergetree.tar -C /var/lib/clickhouse # path to ClickHouse data directory -$ # check permissions of unpacked data, fix if required -$ sudo service clickhouse-server restart -$ clickhouse-client --query "select count(*) from datasets.trips_mergetree" -``` - -!!! info "INFO" - Si va a ejecutar las consultas que se describen a continuación, debe usar el nombre completo de la tabla, `datasets.trips_mergetree`. - -## Resultados en un solo servidor {#results-on-single-server} - -Q1: - -``` sql -SELECT cab_type, count(*) FROM trips_mergetree GROUP BY cab_type -``` - -0.490 segundos. - -Q2: - -``` sql -SELECT passenger_count, avg(total_amount) FROM trips_mergetree GROUP BY passenger_count -``` - -1.224 segundos. - -Q3: - -``` sql -SELECT passenger_count, toYear(pickup_date) AS year, count(*) FROM trips_mergetree GROUP BY passenger_count, year -``` - -2.104 segundos. - -Q4: - -``` sql -SELECT passenger_count, toYear(pickup_date) AS year, round(trip_distance) AS distance, count(*) -FROM trips_mergetree -GROUP BY passenger_count, year, distance -ORDER BY year, count(*) DESC -``` - -3.593 segundos. - -Se utilizó el siguiente servidor: - -Dos CPU Intel (R) Xeon (R) E5-2650 v2 @ 2.60GHz, 16 núcleos físicos en total, 128 GiB RAM, 8x6 TB HD en hardware RAID-5 - -El tiempo de ejecución es el mejor de tres carreras. Pero a partir de la segunda ejecución, las consultas leen datos de la memoria caché del sistema de archivos. No se produce más almacenamiento en caché: los datos se leen y procesan en cada ejecución. - -Creación de una tabla en tres servidores: - -En cada servidor: - -``` sql -CREATE TABLE default.trips_mergetree_third ( trip_id UInt32, vendor_id Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14), pickup_date Date, pickup_datetime DateTime, dropoff_date Date, dropoff_datetime DateTime, store_and_fwd_flag UInt8, rate_code_id UInt8, pickup_longitude Float64, pickup_latitude Float64, dropoff_longitude Float64, dropoff_latitude Float64, passenger_count UInt8, trip_distance Float64, fare_amount Float32, extra Float32, mta_tax Float32, tip_amount Float32, tolls_amount Float32, ehail_fee Float32, improvement_surcharge Float32, total_amount Float32, payment_type_ Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), trip_type UInt8, pickup FixedString(25), dropoff FixedString(25), cab_type Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), pickup_nyct2010_gid UInt8, pickup_ctlabel Float32, pickup_borocode UInt8, pickup_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), pickup_ct2010 FixedString(6), pickup_boroct2010 FixedString(7), pickup_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), pickup_ntacode FixedString(4), pickup_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), pickup_puma UInt16, dropoff_nyct2010_gid UInt8, dropoff_ctlabel Float32, dropoff_borocode UInt8, dropoff_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), dropoff_ct2010 FixedString(6), dropoff_boroct2010 FixedString(7), dropoff_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), dropoff_ntacode FixedString(4), dropoff_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), dropoff_puma UInt16) ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) -``` - -En el servidor de origen: - -``` sql -CREATE TABLE trips_mergetree_x3 AS trips_mergetree_third ENGINE = Distributed(perftest, default, trips_mergetree_third, rand()) -``` - -La siguiente consulta redistribuye los datos: - -``` sql -INSERT INTO trips_mergetree_x3 SELECT * FROM trips_mergetree -``` - -Esto tarda 2454 segundos. - -En tres servidores: - -Q1: 0.212 segundos. -Q2: 0.438 segundos. -Q3: 0.733 segundos. -Q4: 1.241 segundos. - -No hay sorpresas aquí, ya que las consultas se escalan linealmente. - -También tenemos los resultados de un clúster de 140 servidores: - -Q1: 0,028 seg. -Q2: 0,043 seg. -Q3: 0,051 seg. -Q4: 0,072 seg. - -En este caso, el tiempo de procesamiento de la consulta está determinado sobre todo por la latencia de la red. -Ejecutamos consultas utilizando un cliente ubicado en un centro de datos de Yandex en Finlandia en un clúster en Rusia, que agregó aproximadamente 20 ms de latencia. - -## Resumen {#summary} - -| servidor | Q1 | Q2 | Q3 | Q4 | -|----------|-------|-------|-------|-------| -| 1 | 0.490 | 1.224 | 2.104 | 3.593 | -| 3 | 0.212 | 0.438 | 0.733 | 1.241 | -| 140 | 0.028 | 0.043 | 0.051 | 0.072 | - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/es/getting-started/example-datasets/ontime.md b/docs/es/getting-started/example-datasets/ontime.md deleted file mode 100644 index f89d74048bd..00000000000 --- a/docs/es/getting-started/example-datasets/ontime.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 15 -toc_title: A tiempo ---- - -# A tiempo {#ontime} - -Este conjunto de datos se puede obtener de dos maneras: - -- importación de datos sin procesar -- descarga de particiones preparadas - -## Importar desde datos sin procesar {#import-from-raw-data} - -Descarga de datos: - -``` bash -for s in `seq 1987 2018` -do -for m in `seq 1 12` -do -wget https://transtats.bts.gov/PREZIP/On_Time_Reporting_Carrier_On_Time_Performance_1987_present_${s}_${m}.zip -done -done -``` - -(a partir de https://github.com/Percona-Lab/ontime-airline-performance/blob/master/download.sh ) - -Creación de una tabla: - -``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String -) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; -``` - -Carga de datos: - -``` bash -$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done -``` - -## Descarga de Prepared Partitions {#download-of-prepared-partitions} - -``` bash -$ curl -O https://datasets.clickhouse.tech/ontime/partitions/ontime.tar -$ tar xvf ontime.tar -C /var/lib/clickhouse # path to ClickHouse data directory -$ # check permissions of unpacked data, fix if required -$ sudo service clickhouse-server restart -$ clickhouse-client --query "select count(*) from datasets.ontime" -``` - -!!! info "INFO" - Si va a ejecutar las consultas que se describen a continuación, debe usar el nombre completo de la tabla, `datasets.ontime`. - -## Consulta {#queries} - -Q0. - -``` sql -SELECT avg(c1) -FROM -( - SELECT Year, Month, count(*) AS c1 - FROM ontime - GROUP BY Year, Month -); -``` - -Q1. El número de vuelos por día desde el año 2000 hasta 2008 - -``` sql -SELECT DayOfWeek, count(*) AS c -FROM ontime -WHERE Year>=2000 AND Year<=2008 -GROUP BY DayOfWeek -ORDER BY c DESC; -``` - -Preguntas frecuentes El número de vuelos retrasados por más de 10 minutos, agrupados por el día de la semana, para 2000-2008 - -``` sql -SELECT DayOfWeek, count(*) AS c -FROM ontime -WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 -GROUP BY DayOfWeek -ORDER BY c DESC; -``` - -Q3. El número de retrasos por parte del aeropuerto para 2000-2008 - -``` sql -SELECT Origin, count(*) AS c -FROM ontime -WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 -GROUP BY Origin -ORDER BY c DESC -LIMIT 10; -``` - -Preguntas más frecuentes Número de retrasos por transportista para 2007 - -``` sql -SELECT Carrier, count(*) -FROM ontime -WHERE DepDelay>10 AND Year=2007 -GROUP BY Carrier -ORDER BY count(*) DESC; -``` - -Q5. El porcentaje de retrasos por transportista para 2007 - -``` sql -SELECT Carrier, c, c2, c*100/c2 as c3 -FROM -( - SELECT - Carrier, - count(*) AS c - FROM ontime - WHERE DepDelay>10 - AND Year=2007 - GROUP BY Carrier -) -JOIN -( - SELECT - Carrier, - count(*) AS c2 - FROM ontime - WHERE Year=2007 - GROUP BY Carrier -) USING Carrier -ORDER BY c3 DESC; -``` - -Mejor versión de la misma consulta: - -``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 -FROM ontime -WHERE Year=2007 -GROUP BY Carrier -ORDER BY c3 DESC -``` - -¿Por qué? La solicitud anterior de una gama más amplia de años, 2000-2008 - -``` sql -SELECT Carrier, c, c2, c*100/c2 as c3 -FROM -( - SELECT - Carrier, - count(*) AS c - FROM ontime - WHERE DepDelay>10 - AND Year>=2000 AND Year<=2008 - GROUP BY Carrier -) -JOIN -( - SELECT - Carrier, - count(*) AS c2 - FROM ontime - WHERE Year>=2000 AND Year<=2008 - GROUP BY Carrier -) USING Carrier -ORDER BY c3 DESC; -``` - -Mejor versión de la misma consulta: - -``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 -FROM ontime -WHERE Year>=2000 AND Year<=2008 -GROUP BY Carrier -ORDER BY c3 DESC; -``` - -Preguntas frecuentes Porcentaje de vuelos retrasados por más de 10 minutos, por año - -``` sql -SELECT Year, c1/c2 -FROM -( - select - Year, - count(*)*100 as c1 - from ontime - WHERE DepDelay>10 - GROUP BY Year -) -JOIN -( - select - Year, - count(*) as c2 - from ontime - GROUP BY Year -) USING (Year) -ORDER BY Year; -``` - -Mejor versión de la misma consulta: - -``` sql -SELECT Year, avg(DepDelay>10)*100 -FROM ontime -GROUP BY Year -ORDER BY Year; -``` - -¿Por qué? Los destinos más populares por el número de ciudades conectadas directamente para varios rangos de año - -``` sql -SELECT DestCityName, uniqExact(OriginCityName) AS u -FROM ontime -WHERE Year >= 2000 and Year <= 2010 -GROUP BY DestCityName -ORDER BY u DESC LIMIT 10; -``` - -Q9. - -``` sql -SELECT Year, count(*) AS c1 -FROM ontime -GROUP BY Year; -``` - -Q10. - -``` sql -SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, - sum(ArrDelayMinutes>30) AS flights_delayed, - round(sum(ArrDelayMinutes>30)/count(*),2) AS rate -FROM ontime -WHERE - DayOfWeek NOT IN (6,7) AND OriginState NOT IN ('AK', 'HI', 'PR', 'VI') - AND DestState NOT IN ('AK', 'HI', 'PR', 'VI') - AND FlightDate < '2010-01-01' -GROUP by Carrier -HAVING cnt>100000 and max(Year)>1990 -ORDER by rate DESC -LIMIT 1000; -``` - -Bono: - -``` sql -SELECT avg(cnt) -FROM -( - SELECT Year,Month,count(*) AS cnt - FROM ontime - WHERE DepDel15=1 - GROUP BY Year,Month -); - -SELECT avg(c1) FROM -( - SELECT Year,Month,count(*) AS c1 - FROM ontime - GROUP BY Year,Month -); - -SELECT DestCityName, uniqExact(OriginCityName) AS u -FROM ontime -GROUP BY DestCityName -ORDER BY u DESC -LIMIT 10; - -SELECT OriginCityName, DestCityName, count() AS c -FROM ontime -GROUP BY OriginCityName, DestCityName -ORDER BY c DESC -LIMIT 10; - -SELECT OriginCityName, count() AS c -FROM ontime -GROUP BY OriginCityName -ORDER BY c DESC -LIMIT 10; -``` - -Esta prueba de rendimiento fue creada por Vadim Tkachenko. Ver: - -- https://www.percona.com/blog/2009/10/02/analyzing-air-traffic-performance-with-infobright-and-monetdb/ -- https://www.percona.com/blog/2009/10/26/air-traffic-queries-in-luciddb/ -- https://www.percona.com/blog/2009/11/02/air-traffic-queries-in-infinidb-early-alpha/ -- https://www.percona.com/blog/2014/04/21/using-apache-hadoop-and-impala-together-with-mysql-for-data-analysis/ -- https://www.percona.com/blog/2016/01/07/apache-spark-with-air-ontime-performance-data/ -- http://nickmakos.blogspot.ru/2012/08/analyzing-air-traffic-performance-with.html - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/ontime/) diff --git a/docs/es/getting-started/example-datasets/star-schema.md b/docs/es/getting-started/example-datasets/star-schema.md deleted file mode 100644 index 43f878eb205..00000000000 --- a/docs/es/getting-started/example-datasets/star-schema.md +++ /dev/null @@ -1,370 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 20 -toc_title: Estrella Schema Benchmark ---- - -# Estrella Schema Benchmark {#star-schema-benchmark} - -Compilación de dbgen: - -``` bash -$ git clone git@github.com:vadimtk/ssb-dbgen.git -$ cd ssb-dbgen -$ make -``` - -Generación de datos: - -!!! warning "Atención" - Con `-s 100` dbgen genera 600 millones de filas (67 GB), mientras que `-s 1000` genera 6 mil millones de filas (lo que lleva mucho tiempo) - -``` bash -$ ./dbgen -s 1000 -T c -$ ./dbgen -s 1000 -T l -$ ./dbgen -s 1000 -T p -$ ./dbgen -s 1000 -T s -$ ./dbgen -s 1000 -T d -``` - -Creación de tablas en ClickHouse: - -``` sql -CREATE TABLE customer -( - C_CUSTKEY UInt32, - C_NAME String, - C_ADDRESS String, - C_CITY LowCardinality(String), - C_NATION LowCardinality(String), - C_REGION LowCardinality(String), - C_PHONE String, - C_MKTSEGMENT LowCardinality(String) -) -ENGINE = MergeTree ORDER BY (C_CUSTKEY); - -CREATE TABLE lineorder -( - LO_ORDERKEY UInt32, - LO_LINENUMBER UInt8, - LO_CUSTKEY UInt32, - LO_PARTKEY UInt32, - LO_SUPPKEY UInt32, - LO_ORDERDATE Date, - LO_ORDERPRIORITY LowCardinality(String), - LO_SHIPPRIORITY UInt8, - LO_QUANTITY UInt8, - LO_EXTENDEDPRICE UInt32, - LO_ORDTOTALPRICE UInt32, - LO_DISCOUNT UInt8, - LO_REVENUE UInt32, - LO_SUPPLYCOST UInt32, - LO_TAX UInt8, - LO_COMMITDATE Date, - LO_SHIPMODE LowCardinality(String) -) -ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); - -CREATE TABLE part -( - P_PARTKEY UInt32, - P_NAME String, - P_MFGR LowCardinality(String), - P_CATEGORY LowCardinality(String), - P_BRAND LowCardinality(String), - P_COLOR LowCardinality(String), - P_TYPE LowCardinality(String), - P_SIZE UInt8, - P_CONTAINER LowCardinality(String) -) -ENGINE = MergeTree ORDER BY P_PARTKEY; - -CREATE TABLE supplier -( - S_SUPPKEY UInt32, - S_NAME String, - S_ADDRESS String, - S_CITY LowCardinality(String), - S_NATION LowCardinality(String), - S_REGION LowCardinality(String), - S_PHONE String -) -ENGINE = MergeTree ORDER BY S_SUPPKEY; -``` - -Insertar datos: - -``` bash -$ clickhouse-client --query "INSERT INTO customer FORMAT CSV" < customer.tbl -$ clickhouse-client --query "INSERT INTO part FORMAT CSV" < part.tbl -$ clickhouse-client --query "INSERT INTO supplier FORMAT CSV" < supplier.tbl -$ clickhouse-client --query "INSERT INTO lineorder FORMAT CSV" < lineorder.tbl -``` - -Conversión “star schema” a desnormalizado “flat schema”: - -``` sql -SET max_memory_usage = 20000000000; - -CREATE TABLE lineorder_flat -ENGINE = MergeTree -PARTITION BY toYear(LO_ORDERDATE) -ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS -SELECT - l.LO_ORDERKEY AS LO_ORDERKEY, - l.LO_LINENUMBER AS LO_LINENUMBER, - l.LO_CUSTKEY AS LO_CUSTKEY, - l.LO_PARTKEY AS LO_PARTKEY, - l.LO_SUPPKEY AS LO_SUPPKEY, - l.LO_ORDERDATE AS LO_ORDERDATE, - l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY, - l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY, - l.LO_QUANTITY AS LO_QUANTITY, - l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE, - l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE, - l.LO_DISCOUNT AS LO_DISCOUNT, - l.LO_REVENUE AS LO_REVENUE, - l.LO_SUPPLYCOST AS LO_SUPPLYCOST, - l.LO_TAX AS LO_TAX, - l.LO_COMMITDATE AS LO_COMMITDATE, - l.LO_SHIPMODE AS LO_SHIPMODE, - c.C_NAME AS C_NAME, - c.C_ADDRESS AS C_ADDRESS, - c.C_CITY AS C_CITY, - c.C_NATION AS C_NATION, - c.C_REGION AS C_REGION, - c.C_PHONE AS C_PHONE, - c.C_MKTSEGMENT AS C_MKTSEGMENT, - s.S_NAME AS S_NAME, - s.S_ADDRESS AS S_ADDRESS, - s.S_CITY AS S_CITY, - s.S_NATION AS S_NATION, - s.S_REGION AS S_REGION, - s.S_PHONE AS S_PHONE, - p.P_NAME AS P_NAME, - p.P_MFGR AS P_MFGR, - p.P_CATEGORY AS P_CATEGORY, - p.P_BRAND AS P_BRAND, - p.P_COLOR AS P_COLOR, - p.P_TYPE AS P_TYPE, - p.P_SIZE AS P_SIZE, - p.P_CONTAINER AS P_CONTAINER -FROM lineorder AS l -INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY -INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY -INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY; -``` - -Las consultas: - -Q1.1 - -``` sql -SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue -FROM lineorder_flat -WHERE toYear(LO_ORDERDATE) = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25; -``` - -Q1.2 - -``` sql -SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue -FROM lineorder_flat -WHERE toYYYYMM(LO_ORDERDATE) = 199401 AND LO_DISCOUNT BETWEEN 4 AND 6 AND LO_QUANTITY BETWEEN 26 AND 35; -``` - -Q1.3 - -``` sql -SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue -FROM lineorder_flat -WHERE toISOWeek(LO_ORDERDATE) = 6 AND toYear(LO_ORDERDATE) = 1994 - AND LO_DISCOUNT BETWEEN 5 AND 7 AND LO_QUANTITY BETWEEN 26 AND 35; -``` - -Q2.1 - -``` sql -SELECT - sum(LO_REVENUE), - toYear(LO_ORDERDATE) AS year, - P_BRAND -FROM lineorder_flat -WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA' -GROUP BY - year, - P_BRAND -ORDER BY - year, - P_BRAND; -``` - -Q2.2 - -``` sql -SELECT - sum(LO_REVENUE), - toYear(LO_ORDERDATE) AS year, - P_BRAND -FROM lineorder_flat -WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' -GROUP BY - year, - P_BRAND -ORDER BY - year, - P_BRAND; -``` - -Q2.3 - -``` sql -SELECT - sum(LO_REVENUE), - toYear(LO_ORDERDATE) AS year, - P_BRAND -FROM lineorder_flat -WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' -GROUP BY - year, - P_BRAND -ORDER BY - year, - P_BRAND; -``` - -Q3.1 - -``` sql -SELECT - C_NATION, - S_NATION, - toYear(LO_ORDERDATE) AS year, - sum(LO_REVENUE) AS revenue -FROM lineorder_flat -WHERE C_REGION = 'ASIA' AND S_REGION = 'ASIA' AND year >= 1992 AND year <= 1997 -GROUP BY - C_NATION, - S_NATION, - year -ORDER BY - year ASC, - revenue DESC; -``` - -Q3.2 - -``` sql -SELECT - C_CITY, - S_CITY, - toYear(LO_ORDERDATE) AS year, - sum(LO_REVENUE) AS revenue -FROM lineorder_flat -WHERE C_NATION = 'UNITED STATES' AND S_NATION = 'UNITED STATES' AND year >= 1992 AND year <= 1997 -GROUP BY - C_CITY, - S_CITY, - year -ORDER BY - year ASC, - revenue DESC; -``` - -Q3.3 - -``` sql -SELECT - C_CITY, - S_CITY, - toYear(LO_ORDERDATE) AS year, - sum(LO_REVENUE) AS revenue -FROM lineorder_flat -WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND year >= 1992 AND year <= 1997 -GROUP BY - C_CITY, - S_CITY, - year -ORDER BY - year ASC, - revenue DESC; -``` - -Q3.4 - -``` sql -SELECT - C_CITY, - S_CITY, - toYear(LO_ORDERDATE) AS year, - sum(LO_REVENUE) AS revenue -FROM lineorder_flat -WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND toYYYYMM(LO_ORDERDATE) = 199712 -GROUP BY - C_CITY, - S_CITY, - year -ORDER BY - year ASC, - revenue DESC; -``` - -Q4.1 - -``` sql -SELECT - toYear(LO_ORDERDATE) AS year, - C_NATION, - sum(LO_REVENUE - LO_SUPPLYCOST) AS profit -FROM lineorder_flat -WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') -GROUP BY - year, - C_NATION -ORDER BY - year ASC, - C_NATION ASC; -``` - -Q4.2 - -``` sql -SELECT - toYear(LO_ORDERDATE) AS year, - S_NATION, - P_CATEGORY, - sum(LO_REVENUE - LO_SUPPLYCOST) AS profit -FROM lineorder_flat -WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (year = 1997 OR year = 1998) AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') -GROUP BY - year, - S_NATION, - P_CATEGORY -ORDER BY - year ASC, - S_NATION ASC, - P_CATEGORY ASC; -``` - -Q4.3 - -``` sql -SELECT - toYear(LO_ORDERDATE) AS year, - S_CITY, - P_BRAND, - sum(LO_REVENUE - LO_SUPPLYCOST) AS profit -FROM lineorder_flat -WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' -GROUP BY - year, - S_CITY, - P_BRAND -ORDER BY - year ASC, - S_CITY ASC, - P_BRAND ASC; -``` - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) diff --git a/docs/es/getting-started/example-datasets/wikistat.md b/docs/es/getting-started/example-datasets/wikistat.md deleted file mode 100644 index 49d7263cdec..00000000000 --- a/docs/es/getting-started/example-datasets/wikistat.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 18 -toc_title: "Nombre de la red inal\xE1mbrica (SSID):" ---- - -# Nombre de la red inalámbrica (SSID): {#wikistat} - -Ver: http://dumps.wikimedia.org/other/pagecounts-raw/ - -Creación de una tabla: - -``` sql -CREATE TABLE wikistat -( - date Date, - time DateTime, - project String, - subproject String, - path String, - hits UInt64, - size UInt64 -) ENGINE = MergeTree(date, (path, time), 8192); -``` - -Carga de datos: - -``` bash -$ for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt -$ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done -$ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done -``` - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) diff --git a/docs/es/getting-started/index.md b/docs/es/getting-started/index.md deleted file mode 100644 index 681c2017ac1..00000000000 --- a/docs/es/getting-started/index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Primeros pasos -toc_hidden: true -toc_priority: 8 -toc_title: oculto ---- - -# Primeros pasos {#getting-started} - -Si eres nuevo en ClickHouse y quieres tener una sensación práctica de su rendimiento, antes que nada, debes pasar por el [proceso de instalación](install.md). Después de eso puedes: - -- [Ir a través de tutorial detallado](tutorial.md) -- [Experimente con conjuntos de datos de ejemplo](example-datasets/ontime.md) - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/) diff --git a/docs/es/getting-started/install.md b/docs/es/getting-started/install.md deleted file mode 100644 index 092ef47b2f7..00000000000 --- a/docs/es/getting-started/install.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 11 -toc_title: "Instalaci\xF3n" ---- - -# Instalación {#installation} - -## Requisitos del sistema {#system-requirements} - -ClickHouse puede ejecutarse en cualquier Linux, FreeBSD o Mac OS X con arquitectura de CPU x86_64, AArch64 o PowerPC64LE. - -Los binarios oficiales preconstruidos generalmente se compilan para x86_64 y aprovechan el conjunto de instrucciones SSE 4.2, por lo que, a menos que se indique lo contrario, el uso de la CPU que lo admite se convierte en un requisito adicional del sistema. Aquí está el comando para verificar si la CPU actual tiene soporte para SSE 4.2: - -``` bash -$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" -``` - -Para ejecutar ClickHouse en procesadores que no admiten SSE 4.2 o tienen arquitectura AArch64 o PowerPC64LE, debe [construir ClickHouse a partir de fuentes](#from-sources) con los ajustes de configuración adecuados. - -## Opciones de instalación disponibles {#available-installation-options} - -### De paquetes DEB {#install-from-deb-packages} - -Se recomienda utilizar pre-compilado oficial `deb` Paquetes para Debian o Ubuntu. Ejecute estos comandos para instalar paquetes: - -``` bash -{% include 'install/deb.sh' %} -``` - -Si desea utilizar la versión más reciente, reemplace `stable` con `testing` (esto se recomienda para sus entornos de prueba). - -También puede descargar e instalar paquetes manualmente desde [aqui](https://repo.clickhouse.tech/deb/stable/main/). - -#### Paquete {#packages} - -- `clickhouse-common-static` — Installs ClickHouse compiled binary files. -- `clickhouse-server` — Creates a symbolic link for `clickhouse-server` e instala la configuración predeterminada del servidor. -- `clickhouse-client` — Creates a symbolic link for `clickhouse-client` y otras herramientas relacionadas con el cliente. e instala los archivos de configuración del cliente. -- `clickhouse-common-static-dbg` — Installs ClickHouse compiled binary files with debug info. - -### De paquetes RPM {#from-rpm-packages} - -Se recomienda utilizar pre-compilado oficial `rpm` También puede utilizar los paquetes para CentOS, RedHat y todas las demás distribuciones de Linux basadas en rpm. - -Primero, necesitas agregar el repositorio oficial: - -``` bash -sudo yum install yum-utils -sudo rpm --import https://repo.clickhouse.tech/CLICKHOUSE-KEY.GPG -sudo yum-config-manager --add-repo https://repo.clickhouse.tech/rpm/stable/x86_64 -``` - -Si desea utilizar la versión más reciente, reemplace `stable` con `testing` (esto se recomienda para sus entornos de prueba). El `prestable` etiqueta a veces está disponible también. - -A continuación, ejecute estos comandos para instalar paquetes: - -``` bash -sudo yum install clickhouse-server clickhouse-client -``` - -También puede descargar e instalar paquetes manualmente desde [aqui](https://repo.clickhouse.tech/rpm/stable/x86_64). - -### De archivos Tgz {#from-tgz-archives} - -Se recomienda utilizar pre-compilado oficial `tgz` para todas las distribuciones de Linux, donde la instalación de `deb` o `rpm` paquetes no es posible. - -La versión requerida se puede descargar con `curl` o `wget` desde el repositorio https://repo.clickhouse.tech/tgz/. -Después de eso, los archivos descargados deben desempaquetarse e instalarse con scripts de instalación. Ejemplo para la última versión: - -``` bash -export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1` -curl -O https://repo.clickhouse.tech/tgz/clickhouse-common-static-$LATEST_VERSION.tgz -curl -O https://repo.clickhouse.tech/tgz/clickhouse-common-static-dbg-$LATEST_VERSION.tgz -curl -O https://repo.clickhouse.tech/tgz/clickhouse-server-$LATEST_VERSION.tgz -curl -O https://repo.clickhouse.tech/tgz/clickhouse-client-$LATEST_VERSION.tgz - -tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz -sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh - -tar -xzvf clickhouse-common-static-dbg-$LATEST_VERSION.tgz -sudo clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh - -tar -xzvf clickhouse-server-$LATEST_VERSION.tgz -sudo clickhouse-server-$LATEST_VERSION/install/doinst.sh -sudo /etc/init.d/clickhouse-server start - -tar -xzvf clickhouse-client-$LATEST_VERSION.tgz -sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh -``` - -Para los entornos de producción, se recomienda utilizar las últimas `stable`-versión. Puede encontrar su número en la página de GitHub https://github.com/ClickHouse/ClickHouse/tags con postfix `-stable`. - -### Desde Docker Image {#from-docker-image} - -Para ejecutar ClickHouse dentro de Docker, siga la guía en [Eje de acoplador](https://hub.docker.com/r/yandex/clickhouse-server/). Esas imágenes usan oficial `deb` paquetes dentro. - -### De fuentes {#from-sources} - -Para compilar manualmente ClickHouse, siga las instrucciones para [Linux](../development/build.md) o [Mac OS X](../development/build-osx.md). - -Puede compilar paquetes e instalarlos o usar programas sin instalar paquetes. Además, al construir manualmente, puede deshabilitar el requisito de SSE 4.2 o compilar para CPU AArch64. - - Client: programs/clickhouse-client - Server: programs/clickhouse-server - -Tendrá que crear carpetas de datos y metadatos y `chown` para el usuario deseado. Sus rutas se pueden cambiar en la configuración del servidor (src/programs/server/config.xml), por defecto son: - - /opt/clickhouse/data/default/ - /opt/clickhouse/metadata/default/ - -En Gentoo, puedes usar `emerge clickhouse` para instalar ClickHouse desde fuentes. - -## Lanzar {#launch} - -Para iniciar el servidor como demonio, ejecute: - -``` bash -$ sudo service clickhouse-server start -``` - -Si no tienes `service` comando ejecutar como - -``` bash -$ sudo /etc/init.d/clickhouse-server start -``` - -Vea los registros en el `/var/log/clickhouse-server/` directorio. - -Si el servidor no se inicia, compruebe las configuraciones en el archivo `/etc/clickhouse-server/config.xml`. - -También puede iniciar manualmente el servidor desde la consola: - -``` bash -$ clickhouse-server --config-file=/etc/clickhouse-server/config.xml -``` - -En este caso, el registro se imprimirá en la consola, lo cual es conveniente durante el desarrollo. -Si el archivo de configuración está en el directorio actual, no es necesario `--config-file` parámetro. De forma predeterminada, utiliza `./config.xml`. - -ClickHouse admite la configuración de restricción de acceso. Están ubicados en el `users.xml` archivo (junto a `config.xml`). -De forma predeterminada, se permite el acceso desde cualquier lugar `default` usuario, sin una contraseña. Ver `user/default/networks`. -Para obtener más información, consulte la sección [“Configuration Files”](../operations/configuration-files.md). - -Después de iniciar el servidor, puede usar el cliente de línea de comandos para conectarse a él: - -``` bash -$ clickhouse-client -``` - -Por defecto, se conecta a `localhost:9000` en nombre del usuario `default` sin una contraseña. También se puede usar para conectarse a un servidor remoto usando `--host` argumento. - -El terminal debe usar codificación UTF-8. -Para obtener más información, consulte la sección [“Command-line client”](../interfaces/cli.md). - -Ejemplo: - -``` bash -$ ./clickhouse-client -ClickHouse client version 0.0.18749. -Connecting to localhost:9000. -Connected to ClickHouse server version 0.0.18749. - -:) SELECT 1 - -SELECT 1 - -┌─1─┐ -│ 1 │ -└───┘ - -1 rows in set. Elapsed: 0.003 sec. - -:) -``` - -**Felicidades, el sistema funciona!** - -Para continuar experimentando, puede descargar uno de los conjuntos de datos de prueba o pasar por [tutorial](https://clickhouse.tech/tutorial.html). - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/install/) diff --git a/docs/es/getting-started/playground.md b/docs/es/getting-started/playground.md deleted file mode 100644 index 1ab7246e2d4..00000000000 --- a/docs/es/getting-started/playground.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 14 -toc_title: Infantil ---- - -# Zona de juegos ClickHouse {#clickhouse-playground} - -[Zona de juegos ClickHouse](https://play.clickhouse.tech?file=welcome) permite a las personas experimentar con ClickHouse ejecutando consultas al instante, sin configurar su servidor o clúster. -Varios conjuntos de datos de ejemplo están disponibles en Playground, así como consultas de ejemplo que muestran las características de ClickHouse. - -Las consultas se ejecutan como un usuario de sólo lectura. Implica algunas limitaciones: - -- No se permiten consultas DDL -- Las consultas INSERT no están permitidas - -También se aplican los siguientes valores: -- [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) -- [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows) -- [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode) -- [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time) - -ClickHouse Playground da la experiencia de m2.pequeño -[Servicio administrado para ClickHouse](https://cloud.yandex.com/services/managed-clickhouse) -instancia alojada en [El Yandex.Nube](https://cloud.yandex.com/). -Más información sobre [proveedores de la nube](../commercial/cloud.md). - -La interfaz web de ClickHouse Playground realiza solicitudes a través de ClickHouse [HTTP API](../interfaces/http.md). -El backend Playground es solo un clúster ClickHouse sin ninguna aplicación adicional del lado del servidor. -El punto final HTTPS de ClickHouse también está disponible como parte de Playground. - -Puede realizar consultas al patio de recreo utilizando cualquier cliente HTTP, por ejemplo [rizo](https://curl.haxx.se) o [wget](https://www.gnu.org/software/wget/), o configurar una conexión usando [JDBC](../interfaces/jdbc.md) o [ODBC](../interfaces/odbc.md) controlador. -Más información sobre los productos de software compatibles con ClickHouse está disponible [aqui](../interfaces/index.md). - -| Parámetro | Valor | -|:------------|:----------------------------------------------| -| Punto final | https://play-api.casa de clic.tecnología:8443 | -| Usuario | `playground` | -| Contraseña | `clickhouse` | - -Tenga en cuenta que este extremo requiere una conexión segura. - -Ejemplo: - -``` bash -curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets" -``` diff --git a/docs/es/getting-started/tutorial.md b/docs/es/getting-started/tutorial.md deleted file mode 100644 index 2cc9339f954..00000000000 --- a/docs/es/getting-started/tutorial.md +++ /dev/null @@ -1,664 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 12 -toc_title: Tutorial ---- - -# Tutorial de ClickHouse {#clickhouse-tutorial} - -## Qué Esperar de Este Tutorial? {#what-to-expect-from-this-tutorial} - -Al pasar por este tutorial, aprenderá cómo configurar un clúster de ClickHouse simple. Será pequeño, pero tolerante a fallos y escalable. Luego usaremos uno de los conjuntos de datos de ejemplo para llenarlo con datos y ejecutar algunas consultas de demostración. - -## Configuración de nodo único {#single-node-setup} - -Para posponer las complejidades de un entorno distribuido, comenzaremos con la implementación de ClickHouse en un único servidor o máquina virtual. ClickHouse generalmente se instala desde [deb](install.md#install-from-deb-packages) o [RPM](install.md#from-rpm-packages) paquetes, pero hay [alternativa](install.md#from-docker-image) para los sistemas operativos que no los admiten. - -Por ejemplo, ha elegido `deb` paquetes y ejecutado: - -``` bash -{% include 'install/deb.sh' %} -``` - -¿Qué tenemos en los paquetes que tengo instalados: - -- `clickhouse-client` el paquete contiene [Casa de clics-cliente](../interfaces/cli.md) aplicación, cliente interactivo de la consola ClickHouse. -- `clickhouse-common` El paquete contiene un archivo ejecutable ClickHouse. -- `clickhouse-server` El paquete contiene archivos de configuración para ejecutar ClickHouse como servidor. - -Los archivos de configuración del servidor se encuentran en `/etc/clickhouse-server/`. Antes de ir más lejos, tenga en cuenta el `` elemento en `config.xml`. La ruta determina la ubicación para el almacenamiento de datos, por lo que debe ubicarse en un volumen con gran capacidad de disco; el valor predeterminado es `/var/lib/clickhouse/`. Si desea ajustar la configuración, no es útil editar directamente `config.xml` archivo, teniendo en cuenta que podría ser reescrito en futuras actualizaciones de paquetes. La forma recomendada de anular los elementos de configuración es crear [archivos en config.directorio d](../operations/configuration-files.md) que sirven como “patches” de configuración.XML. - -Como habrás notado, `clickhouse-server` no se inicia automáticamente después de la instalación del paquete. Tampoco se reiniciará automáticamente después de las actualizaciones. La forma en que inicia el servidor depende de su sistema de inicio, por lo general, es: - -``` bash -sudo service clickhouse-server start -``` - -o - -``` bash -sudo /etc/init.d/clickhouse-server start -``` - -La ubicación predeterminada para los registros del servidor es `/var/log/clickhouse-server/`. El servidor está listo para manejar las conexiones de cliente una vez que registra el `Ready for connections` mensaje. - -Una vez que el `clickhouse-server` está en funcionamiento, podemos usar `clickhouse-client` para conectarse al servidor y ejecutar algunas consultas de prueba como `SELECT "Hello, world!";`. - -
- -Consejos rápidos para clickhouse-cliente - -Modo interactivo: - -``` bash -clickhouse-client -clickhouse-client --host=... --port=... --user=... --password=... -``` - -Habilitar consultas multilínea: - -``` bash -clickhouse-client -m -clickhouse-client --multiline -``` - -Ejecutar consultas en modo por lotes: - -``` bash -clickhouse-client --query='SELECT 1' -echo 'SELECT 1' | clickhouse-client -clickhouse-client <<< 'SELECT 1' -``` - -Insertar datos de un archivo en el formato especificado: - -``` bash -clickhouse-client --query='INSERT INTO table VALUES' < data.txt -clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv -``` - -
- -## Importar conjunto de datos de muestra {#import-sample-dataset} - -Ahora es el momento de llenar nuestro servidor ClickHouse con algunos datos de muestra. En este tutorial, usaremos los datos anónimos de Yandex.Metrica, el primer servicio que ejecuta ClickHouse en forma de producción antes de que se convirtiera en código abierto (más sobre eso en [sección de historia](../introduction/history.md)). Hay [múltiples formas de importar Yandex.Conjunto de datos de Metrica](example-datasets/metrica.md), y por el bien del tutorial, iremos con el más realista. - -### Descargar y extraer datos de tabla {#download-and-extract-table-data} - -``` bash -curl https://datasets.clickhouse.tech/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv -curl https://datasets.clickhouse.tech/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv -``` - -Los archivos extraídos tienen un tamaño de aproximadamente 10 GB. - -### Crear tablas {#create-tables} - -Como en la mayoría de los sistemas de gestión de bases de datos, ClickHouse agrupa lógicamente las tablas en “databases”. Hay un `default` base de datos, pero crearemos una nueva llamada `tutorial`: - -``` bash -clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" -``` - -La sintaxis para crear tablas es mucho más complicada en comparación con las bases de datos (ver [referencia](../sql-reference/statements/create.md). En general `CREATE TABLE` declaración tiene que especificar tres cosas clave: - -1. Nombre de la tabla que se va a crear. -2. Table schema, i.e. list of columns and their [tipos de datos](../sql-reference/data-types/index.md). -3. [Motor de tabla](../engines/table-engines/index.md) y su configuración, que determina todos los detalles sobre cómo se ejecutarán físicamente las consultas a esta tabla. - -El Yandex.Metrica es un servicio de análisis web, y el conjunto de datos de muestra no cubre toda su funcionalidad, por lo que solo hay dos tablas para crear: - -- `hits` es una tabla con cada acción realizada por todos los usuarios en todos los sitios web cubiertos por el servicio. -- `visits` es una tabla que contiene sesiones precompiladas en lugar de acciones individuales. - -Veamos y ejecutemos las consultas de tabla de creación real para estas tablas: - -``` sql -CREATE TABLE tutorial.hits_v1 -( - `WatchID` UInt64, - `JavaEnable` UInt8, - `Title` String, - `GoodEvent` Int16, - `EventTime` DateTime, - `EventDate` Date, - `CounterID` UInt32, - `ClientIP` UInt32, - `ClientIP6` FixedString(16), - `RegionID` UInt32, - `UserID` UInt64, - `CounterClass` Int8, - `OS` UInt8, - `UserAgent` UInt8, - `URL` String, - `Referer` String, - `URLDomain` String, - `RefererDomain` String, - `Refresh` UInt8, - `IsRobot` UInt8, - `RefererCategories` Array(UInt16), - `URLCategories` Array(UInt16), - `URLRegions` Array(UInt32), - `RefererRegions` Array(UInt32), - `ResolutionWidth` UInt16, - `ResolutionHeight` UInt16, - `ResolutionDepth` UInt8, - `FlashMajor` UInt8, - `FlashMinor` UInt8, - `FlashMinor2` String, - `NetMajor` UInt8, - `NetMinor` UInt8, - `UserAgentMajor` UInt16, - `UserAgentMinor` FixedString(2), - `CookieEnable` UInt8, - `JavascriptEnable` UInt8, - `IsMobile` UInt8, - `MobilePhone` UInt8, - `MobilePhoneModel` String, - `Params` String, - `IPNetworkID` UInt32, - `TraficSourceID` Int8, - `SearchEngineID` UInt16, - `SearchPhrase` String, - `AdvEngineID` UInt8, - `IsArtifical` UInt8, - `WindowClientWidth` UInt16, - `WindowClientHeight` UInt16, - `ClientTimeZone` Int16, - `ClientEventTime` DateTime, - `SilverlightVersion1` UInt8, - `SilverlightVersion2` UInt8, - `SilverlightVersion3` UInt32, - `SilverlightVersion4` UInt16, - `PageCharset` String, - `CodeVersion` UInt32, - `IsLink` UInt8, - `IsDownload` UInt8, - `IsNotBounce` UInt8, - `FUniqID` UInt64, - `HID` UInt32, - `IsOldCounter` UInt8, - `IsEvent` UInt8, - `IsParameter` UInt8, - `DontCountHits` UInt8, - `WithHash` UInt8, - `HitColor` FixedString(1), - `UTCEventTime` DateTime, - `Age` UInt8, - `Sex` UInt8, - `Income` UInt8, - `Interests` UInt16, - `Robotness` UInt8, - `GeneralInterests` Array(UInt16), - `RemoteIP` UInt32, - `RemoteIP6` FixedString(16), - `WindowName` Int32, - `OpenerName` Int32, - `HistoryLength` Int16, - `BrowserLanguage` FixedString(2), - `BrowserCountry` FixedString(2), - `SocialNetwork` String, - `SocialAction` String, - `HTTPError` UInt16, - `SendTiming` Int32, - `DNSTiming` Int32, - `ConnectTiming` Int32, - `ResponseStartTiming` Int32, - `ResponseEndTiming` Int32, - `FetchTiming` Int32, - `RedirectTiming` Int32, - `DOMInteractiveTiming` Int32, - `DOMContentLoadedTiming` Int32, - `DOMCompleteTiming` Int32, - `LoadEventStartTiming` Int32, - `LoadEventEndTiming` Int32, - `NSToDOMContentLoadedTiming` Int32, - `FirstPaintTiming` Int32, - `RedirectCount` Int8, - `SocialSourceNetworkID` UInt8, - `SocialSourcePage` String, - `ParamPrice` Int64, - `ParamOrderID` String, - `ParamCurrency` FixedString(3), - `ParamCurrencyID` UInt16, - `GoalsReached` Array(UInt32), - `OpenstatServiceName` String, - `OpenstatCampaignID` String, - `OpenstatAdID` String, - `OpenstatSourceID` String, - `UTMSource` String, - `UTMMedium` String, - `UTMCampaign` String, - `UTMContent` String, - `UTMTerm` String, - `FromTag` String, - `HasGCLID` UInt8, - `RefererHash` UInt64, - `URLHash` UInt64, - `CLID` UInt32, - `YCLID` UInt64, - `ShareService` String, - `ShareURL` String, - `ShareTitle` String, - `ParsedParams` Nested( - Key1 String, - Key2 String, - Key3 String, - Key4 String, - Key5 String, - ValueDouble Float64), - `IslandID` FixedString(16), - `RequestNum` UInt32, - `RequestTry` UInt8 -) -ENGINE = MergeTree() -PARTITION BY toYYYYMM(EventDate) -ORDER BY (CounterID, EventDate, intHash32(UserID)) -SAMPLE BY intHash32(UserID) -``` - -``` sql -CREATE TABLE tutorial.visits_v1 -( - `CounterID` UInt32, - `StartDate` Date, - `Sign` Int8, - `IsNew` UInt8, - `VisitID` UInt64, - `UserID` UInt64, - `StartTime` DateTime, - `Duration` UInt32, - `UTCStartTime` DateTime, - `PageViews` Int32, - `Hits` Int32, - `IsBounce` UInt8, - `Referer` String, - `StartURL` String, - `RefererDomain` String, - `StartURLDomain` String, - `EndURL` String, - `LinkURL` String, - `IsDownload` UInt8, - `TraficSourceID` Int8, - `SearchEngineID` UInt16, - `SearchPhrase` String, - `AdvEngineID` UInt8, - `PlaceID` Int32, - `RefererCategories` Array(UInt16), - `URLCategories` Array(UInt16), - `URLRegions` Array(UInt32), - `RefererRegions` Array(UInt32), - `IsYandex` UInt8, - `GoalReachesDepth` Int32, - `GoalReachesURL` Int32, - `GoalReachesAny` Int32, - `SocialSourceNetworkID` UInt8, - `SocialSourcePage` String, - `MobilePhoneModel` String, - `ClientEventTime` DateTime, - `RegionID` UInt32, - `ClientIP` UInt32, - `ClientIP6` FixedString(16), - `RemoteIP` UInt32, - `RemoteIP6` FixedString(16), - `IPNetworkID` UInt32, - `SilverlightVersion3` UInt32, - `CodeVersion` UInt32, - `ResolutionWidth` UInt16, - `ResolutionHeight` UInt16, - `UserAgentMajor` UInt16, - `UserAgentMinor` UInt16, - `WindowClientWidth` UInt16, - `WindowClientHeight` UInt16, - `SilverlightVersion2` UInt8, - `SilverlightVersion4` UInt16, - `FlashVersion3` UInt16, - `FlashVersion4` UInt16, - `ClientTimeZone` Int16, - `OS` UInt8, - `UserAgent` UInt8, - `ResolutionDepth` UInt8, - `FlashMajor` UInt8, - `FlashMinor` UInt8, - `NetMajor` UInt8, - `NetMinor` UInt8, - `MobilePhone` UInt8, - `SilverlightVersion1` UInt8, - `Age` UInt8, - `Sex` UInt8, - `Income` UInt8, - `JavaEnable` UInt8, - `CookieEnable` UInt8, - `JavascriptEnable` UInt8, - `IsMobile` UInt8, - `BrowserLanguage` UInt16, - `BrowserCountry` UInt16, - `Interests` UInt16, - `Robotness` UInt8, - `GeneralInterests` Array(UInt16), - `Params` Array(String), - `Goals` Nested( - ID UInt32, - Serial UInt32, - EventTime DateTime, - Price Int64, - OrderID String, - CurrencyID UInt32), - `WatchIDs` Array(UInt64), - `ParamSumPrice` Int64, - `ParamCurrency` FixedString(3), - `ParamCurrencyID` UInt16, - `ClickLogID` UInt64, - `ClickEventID` Int32, - `ClickGoodEvent` Int32, - `ClickEventTime` DateTime, - `ClickPriorityID` Int32, - `ClickPhraseID` Int32, - `ClickPageID` Int32, - `ClickPlaceID` Int32, - `ClickTypeID` Int32, - `ClickResourceID` Int32, - `ClickCost` UInt32, - `ClickClientIP` UInt32, - `ClickDomainID` UInt32, - `ClickURL` String, - `ClickAttempt` UInt8, - `ClickOrderID` UInt32, - `ClickBannerID` UInt32, - `ClickMarketCategoryID` UInt32, - `ClickMarketPP` UInt32, - `ClickMarketCategoryName` String, - `ClickMarketPPName` String, - `ClickAWAPSCampaignName` String, - `ClickPageName` String, - `ClickTargetType` UInt16, - `ClickTargetPhraseID` UInt64, - `ClickContextType` UInt8, - `ClickSelectType` Int8, - `ClickOptions` String, - `ClickGroupBannerID` Int32, - `OpenstatServiceName` String, - `OpenstatCampaignID` String, - `OpenstatAdID` String, - `OpenstatSourceID` String, - `UTMSource` String, - `UTMMedium` String, - `UTMCampaign` String, - `UTMContent` String, - `UTMTerm` String, - `FromTag` String, - `HasGCLID` UInt8, - `FirstVisit` DateTime, - `PredLastVisit` Date, - `LastVisit` Date, - `TotalVisits` UInt32, - `TraficSource` Nested( - ID Int8, - SearchEngineID UInt16, - AdvEngineID UInt8, - PlaceID UInt16, - SocialSourceNetworkID UInt8, - Domain String, - SearchPhrase String, - SocialSourcePage String), - `Attendance` FixedString(16), - `CLID` UInt32, - `YCLID` UInt64, - `NormalizedRefererHash` UInt64, - `SearchPhraseHash` UInt64, - `RefererDomainHash` UInt64, - `NormalizedStartURLHash` UInt64, - `StartURLDomainHash` UInt64, - `NormalizedEndURLHash` UInt64, - `TopLevelDomain` UInt64, - `URLScheme` UInt64, - `OpenstatServiceNameHash` UInt64, - `OpenstatCampaignIDHash` UInt64, - `OpenstatAdIDHash` UInt64, - `OpenstatSourceIDHash` UInt64, - `UTMSourceHash` UInt64, - `UTMMediumHash` UInt64, - `UTMCampaignHash` UInt64, - `UTMContentHash` UInt64, - `UTMTermHash` UInt64, - `FromHash` UInt64, - `WebVisorEnabled` UInt8, - `WebVisorActivity` UInt32, - `ParsedParams` Nested( - Key1 String, - Key2 String, - Key3 String, - Key4 String, - Key5 String, - ValueDouble Float64), - `Market` Nested( - Type UInt8, - GoalID UInt32, - OrderID String, - OrderPrice Int64, - PP UInt32, - DirectPlaceID UInt32, - DirectOrderID UInt32, - DirectBannerID UInt32, - GoodID String, - GoodName String, - GoodQuantity Int32, - GoodPrice Int64), - `IslandID` FixedString(16) -) -ENGINE = CollapsingMergeTree(Sign) -PARTITION BY toYYYYMM(StartDate) -ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) -SAMPLE BY intHash32(UserID) -``` - -Puede ejecutar esas consultas utilizando el modo interactivo de `clickhouse-client` (simplemente ejecútelo en un terminal sin especificar una consulta por adelantado) o pruebe algunos [interfaz alternativa](../interfaces/index.md) Si quieres. - -Como podemos ver, `hits_v1` utiliza el [motor básico MergeTree](../engines/table-engines/mergetree-family/mergetree.md), mientras que el `visits_v1` utiliza el [Derrumbar](../engines/table-engines/mergetree-family/collapsingmergetree.md) variante. - -### Importar datos {#import-data} - -La importación de datos a ClickHouse se realiza a través de [INSERT INTO](../sql-reference/statements/insert-into.md) consulta como en muchas otras bases de datos SQL. Sin embargo, los datos generalmente se proporcionan en uno de los [Formatos de serialización compatibles](../interfaces/formats.md) en lugar de `VALUES` cláusula (que también es compatible). - -Los archivos que descargamos anteriormente están en formato separado por tabuladores, así que aquí le mostramos cómo importarlos a través del cliente de la consola: - -``` bash -clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv -clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv -``` - -ClickHouse tiene un montón de [ajustes para sintonizar](../operations/settings/index.md) y una forma de especificarlos en el cliente de la consola es a través de argumentos, como podemos ver con `--max_insert_block_size`. La forma más fácil de averiguar qué configuraciones están disponibles, qué significan y cuáles son los valores predeterminados es consultar el `system.settings` tabla: - -``` sql -SELECT name, value, changed, description -FROM system.settings -WHERE name LIKE '%max_insert_b%' -FORMAT TSV - -max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." -``` - -Opcionalmente se puede [OPTIMIZE](../sql-reference/statements/misc.md#misc_operations-optimize) las tablas después de la importación. Las tablas que están configuradas con un motor de la familia MergeTree siempre fusionan partes de datos en segundo plano para optimizar el almacenamiento de datos (o al menos verificar si tiene sentido). Estas consultas obligan al motor de tablas a realizar la optimización del almacenamiento en este momento en lugar de algún tiempo después: - -``` bash -clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" -clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" -``` - -Estas consultas inician una operación intensiva de E / S y CPU, por lo que si la tabla recibe datos nuevos de manera consistente, es mejor dejarlos solos y dejar que las fusiones se ejecuten en segundo plano. - -Ahora podemos comprobar si la importación de la tabla fue exitosa: - -``` bash -clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" -clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" -``` - -## Consultas de ejemplo {#example-queries} - -``` sql -SELECT - StartURL AS URL, - AVG(Duration) AS AvgDuration -FROM tutorial.visits_v1 -WHERE StartDate BETWEEN '2014-03-23' AND '2014-03-30' -GROUP BY URL -ORDER BY AvgDuration DESC -LIMIT 10 -``` - -``` sql -SELECT - sum(Sign) AS visits, - sumIf(Sign, has(Goals.ID, 1105530)) AS goal_visits, - (100. * goal_visits) / visits AS goal_percent -FROM tutorial.visits_v1 -WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') -``` - -## Implementación de clúster {#cluster-deployment} - -El clúster ClickHouse es un clúster homogéneo. Pasos para configurar: - -1. Instale el servidor ClickHouse en todas las máquinas del clúster -2. Configurar configuraciones de clúster en archivos de configuración -3. Crear tablas locales en cada instancia -4. Crear un [Tabla distribuida](../engines/table-engines/special/distributed.md) - -[Tabla distribuida](../engines/table-engines/special/distributed.md) es en realidad una especie de “view” a las tablas locales del clúster ClickHouse. La consulta SELECT de una tabla distribuida se ejecuta utilizando recursos de todos los fragmentos del clúster. Puede especificar configuraciones para varios clústeres y crear varias tablas distribuidas que proporcionen vistas a diferentes clústeres. - -Ejemplo de configuración para un clúster con tres fragmentos, una réplica cada uno: - -``` xml - - - - - example-perftest01j.yandex.ru - 9000 - - - - - example-perftest02j.yandex.ru - 9000 - - - - - example-perftest03j.yandex.ru - 9000 - - - - -``` - -Para más demostraciones, vamos a crear una nueva tabla local con la misma `CREATE TABLE` consulta que utilizamos para `hits_v1`, pero nombre de tabla diferente: - -``` sql -CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... -``` - -Creación de una tabla distribuida que proporcione una vista en las tablas locales del clúster: - -``` sql -CREATE TABLE tutorial.hits_all AS tutorial.hits_local -ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); -``` - -Una práctica común es crear tablas distribuidas similares en todas las máquinas del clúster. Permite ejecutar consultas distribuidas en cualquier máquina del clúster. También hay una opción alternativa para crear una tabla distribuida temporal para una consulta SELECT determinada usando [remoto](../sql-reference/table-functions/remote.md) función de la tabla. - -Vamos a correr [INSERT SELECT](../sql-reference/statements/insert-into.md) en la tabla Distributed para extender la tabla a varios servidores. - -``` sql -INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; -``` - -!!! warning "Aviso" - Este enfoque no es adecuado para la fragmentación de tablas grandes. Hay una herramienta separada [Método de codificación de datos:](../operations/utilities/clickhouse-copier.md) que puede volver a fragmentar tablas grandes arbitrarias. - -Como era de esperar, las consultas computacionalmente pesadas se ejecutan N veces más rápido si utilizan 3 servidores en lugar de uno. - -En este caso, hemos utilizado un clúster con 3 fragmentos, y cada uno contiene una sola réplica. - -Para proporcionar resiliencia en un entorno de producción, se recomienda que cada fragmento contenga 2-3 réplicas distribuidas entre varias zonas de disponibilidad o centros de datos (o al menos racks). Tenga en cuenta que ClickHouse admite un número ilimitado de réplicas. - -Ejemplo de configuración para un clúster de un fragmento que contiene tres réplicas: - -``` xml - - ... - - - - example-perftest01j.yandex.ru - 9000 - - - example-perftest02j.yandex.ru - 9000 - - - example-perftest03j.yandex.ru - 9000 - - - - -``` - -Para habilitar la replicación nativa [ZooKeeper](http://zookeeper.apache.org/) se requiere. ClickHouse se encarga de la coherencia de los datos en todas las réplicas y ejecuta el procedimiento de restauración después de la falla automáticamente. Se recomienda implementar el clúster ZooKeeper en servidores independientes (donde no se están ejecutando otros procesos, incluido ClickHouse). - -!!! note "Nota" - ZooKeeper no es un requisito estricto: en algunos casos simples, puede duplicar los datos escribiéndolos en todas las réplicas de su código de aplicación. Este enfoque es **ni** recomendado, en este caso, ClickHouse no podrá garantizar la coherencia de los datos en todas las réplicas. Por lo tanto, se convierte en responsabilidad de su aplicación. - -Las ubicaciones de ZooKeeper se especifican en el archivo de configuración: - -``` xml - - - zoo01.yandex.ru - 2181 - - - zoo02.yandex.ru - 2181 - - - zoo03.yandex.ru - 2181 - - -``` - -Además, necesitamos establecer macros para identificar cada fragmento y réplica que se utilizan en la creación de tablas: - -``` xml - - 01 - 01 - -``` - -Si no hay réplicas en este momento en la creación de la tabla replicada, se crea una instancia de una nueva primera réplica. Si ya hay réplicas activas, la nueva réplica clona los datos de las existentes. Tiene la opción de crear primero todas las tablas replicadas y, a continuación, insertar datos en ella. Otra opción es crear algunas réplicas y agregar las otras después o durante la inserción de datos. - -``` sql -CREATE TABLE tutorial.hits_replica (...) -ENGINE = ReplcatedMergeTree( - '/clickhouse_perftest/tables/{shard}/hits', - '{replica}' -) -... -``` - -Aquí usamos [ReplicatedMergeTree](../engines/table-engines/mergetree-family/replication.md) motor de mesa. En los parámetros, especificamos la ruta ZooKeeper que contiene identificadores de fragmentos y réplicas. - -``` sql -INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; -``` - -La replicación funciona en modo multi-master. Los datos se pueden cargar en cualquier réplica y el sistema los sincroniza automáticamente con otras instancias. La replicación es asíncrona, por lo que en un momento dado, no todas las réplicas pueden contener datos insertados recientemente. Al menos una réplica debe estar disponible para permitir la ingestión de datos. Otros sincronizarán los datos y repararán la coherencia una vez que vuelvan a activarse. Tenga en cuenta que este enfoque permite la baja posibilidad de una pérdida de datos recientemente insertados. - -[Artículo Original](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/es/guides/apply-catboost-model.md b/docs/es/guides/apply-catboost-model.md deleted file mode 100644 index b1fe50f3276..00000000000 --- a/docs/es/guides/apply-catboost-model.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 41 -toc_title: "Aplicaci\xF3n de modelos CatBoost" ---- - -# Aplicación de un modelo Catboost en ClickHouse {#applying-catboost-model-in-clickhouse} - -[CatBoost](https://catboost.ai) es una biblioteca de impulso de gradiente libre y de código abierto desarrollada en [Yandex](https://yandex.com/company/) para el aprendizaje automático. - -Con esta instrucción, aprenderá a aplicar modelos preentrenados en ClickHouse ejecutando la inferencia de modelos desde SQL. - -Para aplicar un modelo CatBoost en ClickHouse: - -1. [Crear una tabla](#create-table). -2. [Insertar los datos en la tabla](#insert-data-to-table). -3. [Integrar CatBoost en ClickHouse](#integrate-catboost-into-clickhouse) (Paso opcional). -4. [Ejecute la inferencia del modelo desde SQL](#run-model-inference). - -Para obtener más información sobre la formación de modelos CatBoost, consulte [Entrenamiento y aplicación de modelos](https://catboost.ai/docs/features/training.html#training). - -## Requisito {#prerequisites} - -Si no tienes el [Acoplador](https://docs.docker.com/install/) sin embargo, instalarlo. - -!!! note "Nota" - [Acoplador](https://www.docker.com) es una plataforma de software que le permite crear contenedores que aíslan una instalación de CatBoost y ClickHouse del resto del sistema. - -Antes de aplicar un modelo CatBoost: - -**1.** Tire de la [Imagen de acoplador](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) del registro: - -``` bash -$ docker pull yandex/tutorial-catboost-clickhouse -``` - -Esta imagen de Docker contiene todo lo que necesita para ejecutar CatBoost y ClickHouse: código, tiempo de ejecución, bibliotecas, variables de entorno y archivos de configuración. - -**2.** Asegúrese de que la imagen de Docker se haya extraído correctamente: - -``` bash -$ docker image ls -REPOSITORY TAG IMAGE ID CREATED SIZE -yandex/tutorial-catboost-clickhouse latest 622e4d17945b 22 hours ago 1.37GB -``` - -**3.** Inicie un contenedor Docker basado en esta imagen: - -``` bash -$ docker run -it -p 8888:8888 yandex/tutorial-catboost-clickhouse -``` - -## 1. Crear una tabla {#create-table} - -Para crear una tabla ClickHouse para el ejemplo de capacitación: - -**1.** Inicie el cliente de consola ClickHouse en el modo interactivo: - -``` bash -$ clickhouse client -``` - -!!! note "Nota" - El servidor ClickHouse ya se está ejecutando dentro del contenedor Docker. - -**2.** Cree la tabla usando el comando: - -``` sql -:) CREATE TABLE amazon_train -( - date Date MATERIALIZED today(), - ACTION UInt8, - RESOURCE UInt32, - MGR_ID UInt32, - ROLE_ROLLUP_1 UInt32, - ROLE_ROLLUP_2 UInt32, - ROLE_DEPTNAME UInt32, - ROLE_TITLE UInt32, - ROLE_FAMILY_DESC UInt32, - ROLE_FAMILY UInt32, - ROLE_CODE UInt32 -) -ENGINE = MergeTree ORDER BY date -``` - -**3.** Salir del cliente de la consola ClickHouse: - -``` sql -:) exit -``` - -## 2. Insertar los datos en la tabla {#insert-data-to-table} - -Para insertar los datos: - -**1.** Ejecute el siguiente comando: - -``` bash -$ clickhouse client --host 127.0.0.1 --query 'INSERT INTO amazon_train FORMAT CSVWithNames' < ~/amazon/train.csv -``` - -**2.** Inicie el cliente de consola ClickHouse en el modo interactivo: - -``` bash -$ clickhouse client -``` - -**3.** Asegúrese de que los datos se hayan cargado: - -``` sql -:) SELECT count() FROM amazon_train - -SELECT count() -FROM amazon_train - -+-count()-+ -| 65538 | -+-------+ -``` - -## 3. Integrar CatBoost en ClickHouse {#integrate-catboost-into-clickhouse} - -!!! note "Nota" - **Paso opcional.** La imagen de Docker contiene todo lo que necesita para ejecutar CatBoost y ClickHouse. - -Para integrar CatBoost en ClickHouse: - -**1.** Construir la biblioteca de evaluación. - -La forma más rápida de evaluar un modelo CatBoost es compilar `libcatboostmodel.` biblioteca. Para obtener más información acerca de cómo construir la biblioteca, vea [Documentación de CatBoost](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). - -**2.** Cree un nuevo directorio en cualquier lugar y con cualquier nombre, por ejemplo, `data` y poner la biblioteca creada en ella. La imagen de Docker ya contiene la biblioteca `data/libcatboostmodel.so`. - -**3.** Cree un nuevo directorio para el modelo de configuración en cualquier lugar y con cualquier nombre, por ejemplo, `models`. - -**4.** Cree un archivo de configuración de modelo con cualquier nombre, por ejemplo, `models/amazon_model.xml`. - -**5.** Describir la configuración del modelo: - -``` xml - - - - catboost - - amazon - - /home/catboost/tutorial/catboost_model.bin - - 0 - - -``` - -**6.** Agregue la ruta de acceso a CatBoost y la configuración del modelo a la configuración de ClickHouse: - -``` xml - -/home/catboost/data/libcatboostmodel.so -/home/catboost/models/*_model.xml -``` - -## 4. Ejecute la inferencia del modelo desde SQL {#run-model-inference} - -Para el modelo de prueba, ejecute el cliente ClickHouse `$ clickhouse client`. - -Asegurémonos de que el modelo esté funcionando: - -``` sql -:) SELECT - modelEvaluate('amazon', - RESOURCE, - MGR_ID, - ROLE_ROLLUP_1, - ROLE_ROLLUP_2, - ROLE_DEPTNAME, - ROLE_TITLE, - ROLE_FAMILY_DESC, - ROLE_FAMILY, - ROLE_CODE) > 0 AS prediction, - ACTION AS target -FROM amazon_train -LIMIT 10 -``` - -!!! note "Nota" - Función [modelEvaluar](../sql-reference/functions/other-functions.md#function-modelevaluate) devuelve tupla con predicciones sin procesar por clase para modelos multiclase. - -Vamos a predecir la probabilidad: - -``` sql -:) SELECT - modelEvaluate('amazon', - RESOURCE, - MGR_ID, - ROLE_ROLLUP_1, - ROLE_ROLLUP_2, - ROLE_DEPTNAME, - ROLE_TITLE, - ROLE_FAMILY_DESC, - ROLE_FAMILY, - ROLE_CODE) AS prediction, - 1. / (1 + exp(-prediction)) AS probability, - ACTION AS target -FROM amazon_train -LIMIT 10 -``` - -!!! note "Nota" - Más información sobre [exp()](../sql-reference/functions/math-functions.md) función. - -Vamos a calcular LogLoss en la muestra: - -``` sql -:) SELECT -avg(tg * log(prob) + (1 - tg) * log(1 - prob)) AS logloss -FROM -( - SELECT - modelEvaluate('amazon', - RESOURCE, - MGR_ID, - ROLE_ROLLUP_1, - ROLE_ROLLUP_2, - ROLE_DEPTNAME, - ROLE_TITLE, - ROLE_FAMILY_DESC, - ROLE_FAMILY, - ROLE_CODE) AS prediction, - 1. / (1. + exp(-prediction)) AS prob, - ACTION AS tg - FROM amazon_train -) -``` - -!!! note "Nota" - Más información sobre [avg()](../sql-reference/aggregate-functions/reference.md#agg_function-avg) y [registro()](../sql-reference/functions/math-functions.md) función. - -[Artículo Original](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/es/guides/index.md b/docs/es/guides/index.md deleted file mode 100644 index c8332ac7846..00000000000 --- a/docs/es/guides/index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Guiar -toc_priority: 38 -toc_title: "Descripci\xF3n" ---- - -# Guías de ClickHouse {#clickhouse-guides} - -Lista de instrucciones detalladas paso a paso que ayudan a resolver varias tareas usando ClickHouse: - -- [Tutorial sobre la configuración simple del clúster](../getting-started/tutorial.md) -- [Aplicación de un modelo CatBoost en ClickHouse](apply-catboost-model.md) - -[Artículo Original](https://clickhouse.tech/docs/en/guides/) diff --git a/docs/es/images/column-oriented.gif b/docs/es/images/column-oriented.gif deleted file mode 100644 index d5ac7c82848..00000000000 Binary files a/docs/es/images/column-oriented.gif and /dev/null differ diff --git a/docs/es/images/logo.svg b/docs/es/images/logo.svg deleted file mode 100644 index b5ab923ff65..00000000000 --- a/docs/es/images/logo.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/docs/es/images/row-oriented.gif b/docs/es/images/row-oriented.gif deleted file mode 100644 index 41395b5693e..00000000000 Binary files a/docs/es/images/row-oriented.gif and /dev/null differ diff --git a/docs/es/index.md b/docs/es/index.md deleted file mode 100644 index c76fe32e33b..00000000000 --- a/docs/es/index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -machine_translated: false -machine_translated_rev: -toc_priority: 0 -toc_title: "Descripción" ---- - -# ¿Qué es ClickHouse? {#what-is-clickhouse} - -ClickHouse es un sistema de gestión de bases de datos (DBMS), orientado a columnas, para el procesamiento analítico de consultas en línea (OLAP). - -En un DBMS “normal”, orientado a filas, los datos se almacenan en este orden: - -| Fila | Argumento | JavaEnable | Titular | GoodEvent | EventTime | -|------|-------------|------------|---------------------------|-----------|---------------------| -| #0 | 89354350662 | 1 | Relaciones con inversores | 1 | 2016-05-18 05:19:20 | -| #1 | 90329509958 | 0 | Contáctenos | 1 | 2016-05-18 08:10:20 | -| #2 | 89953706054 | 1 | Mision | 1 | 2016-05-18 07:38:00 | -| #N | … | … | … | … | … | - -En otras palabras, todos los valores relacionados con una fila se almacenan físicamente uno junto al otro. - -Ejemplos de un DBMS orientado a filas son MySQL, Postgres y MS SQL Server. - -En un DBMS orientado a columnas, los datos se almacenan así: - -| Fila: | #0 | #1 | #2 | #N | -|-------------|---------------------------|---------------------|---------------------|-----| -| Argumento: | 89354350662 | 90329509958 | 89953706054 | … | -| JavaEnable: | 1 | 0 | 1 | … | -| Titular: | Relaciones con inversores | Contáctenos | Mision | … | -| GoodEvent: | 1 | 1 | 1 | … | -| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … | - -Estos ejemplos solo muestran el orden en el que se organizan los datos. Los valores de diferentes columnas se almacenan por separado y los datos de la misma columna se almacenan juntos. - -Ejemplos de un DBMS orientado a columnas: Vertica, Paraccel (Actian Matrix y Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise y Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid y kdb+. - -Los diferentes modos de ordenar los datos al guardarlos se adecúan mejor a diferentes escenarios. El escenario de acceso a los datos se refiere a qué consultas se hacen, con qué frecuencia y en qué proporción; cuántos datos se leen para cada tipo de consulta - filas, columnas y bytes; la relación entre lectura y actualización de datos; el tamaño de trabajo de los datos y qué tan localmente son usados; si se usan transacciones y qué tan aisladas están;requerimientos de replicación de los datos y de integridad lógica, requerimientos de latencia y caudal (throughput) para cada tipo de consulta, y cosas por el estilo. - -Cuanto mayor sea la carga en el sistema, más importante es personalizar el sistema configurado para que coincida con los requisitos del escenario de uso, y más fino será esta personalización. No existe un sistema que sea igualmente adecuado para escenarios significativamente diferentes. Si un sistema es adaptable a un amplio conjunto de escenarios, bajo una carga alta, el sistema manejará todos los escenarios igualmente mal, o funcionará bien para solo uno o algunos de los escenarios posibles. - -## Propiedades clave del escenario OLAP {#key-properties-of-olap-scenario} - -- La gran mayoría de las solicitudes son para acceso de lectura. -- Los datos se actualizan en lotes bastante grandes (\> 1000 filas), no por filas individuales; o no se actualiza en absoluto. -- Los datos se agregan a la base de datos pero no se modifican. -- Para las lecturas, se extrae un número bastante grande de filas de la base de datos, pero solo un pequeño subconjunto de columnas. -- Las tablas son “wide,” lo que significa que contienen un gran número de columnas. -- Las consultas son relativamente raras (generalmente cientos de consultas por servidor o menos por segundo). -- Para consultas simples, se permiten latencias de alrededor de 50 ms. -- Los valores de columna son bastante pequeños: números y cadenas cortas (por ejemplo, 60 bytes por URL). -- Requiere un alto rendimiento al procesar una sola consulta (hasta miles de millones de filas por segundo por servidor). -- Las transacciones no son necesarias. -- Bajos requisitos para la coherencia de los datos. -- Hay una tabla grande por consulta. Todas las mesas son pequeñas, excepto una. -- Un resultado de consulta es significativamente menor que los datos de origen. En otras palabras, los datos se filtran o se agregan, por lo que el resultado se ajusta a la RAM de un solo servidor. - -Es fácil ver que el escenario OLAP es muy diferente de otros escenarios populares (como el acceso OLTP o Key-Value). Por lo tanto, no tiene sentido intentar usar OLTP o una base de datos de valor clave para procesar consultas analíticas si desea obtener un rendimiento decente. Por ejemplo, si intenta usar MongoDB o Redis para análisis, obtendrá un rendimiento muy bajo en comparación con las bases de datos OLAP. - -## Por qué las bases de datos orientadas a columnas funcionan mejor en el escenario OLAP {#why-column-oriented-databases-work-better-in-the-olap-scenario} - -Las bases de datos orientadas a columnas son más adecuadas para los escenarios OLAP: son al menos 100 veces más rápidas en el procesamiento de la mayoría de las consultas. Las razones se explican en detalle a continuación, pero el hecho es más fácil de demostrar visualmente: - -**DBMS orientado a filas** - -![Row-oriented](images/row-oriented.gif#) - -**DBMS orientado a columnas** - -![Column-oriented](images/column-oriented.gif#) - -Ver la diferencia? - -### Entrada/salida {#inputoutput} - -1. Para una consulta analítica, solo es necesario leer un pequeño número de columnas de tabla. En una base de datos orientada a columnas, puede leer solo los datos que necesita. Por ejemplo, si necesita 5 columnas de 100, puede esperar una reducción de 20 veces en E/S. -2. Dado que los datos se leen en paquetes, es más fácil de comprimir. Los datos en columnas también son más fáciles de comprimir. Esto reduce aún más el volumen de E/S. -3. Debido a la reducción de E / S, más datos se ajustan a la memoria caché del sistema. - -Por ejemplo, la consulta “count the number of records for each advertising platform” requiere leer uno “advertising platform ID” columna, que ocupa 1 byte sin comprimir. Si la mayor parte del tráfico no proviene de plataformas publicitarias, puede esperar al menos una compresión de 10 veces de esta columna. Cuando se utiliza un algoritmo de compresión rápida, la descompresión de datos es posible a una velocidad de al menos varios gigabytes de datos sin comprimir por segundo. En otras palabras, esta consulta se puede procesar a una velocidad de aproximadamente varios miles de millones de filas por segundo en un único servidor. Esta velocidad se logra realmente en la práctica. - -### CPU {#cpu} - -Dado que la ejecución de una consulta requiere procesar un gran número de filas, ayuda enviar todas las operaciones para vectores completos en lugar de para filas separadas, o implementar el motor de consultas para que casi no haya costo de envío. Si no hace esto, con cualquier subsistema de disco medio decente, el intérprete de consultas inevitablemente detiene la CPU. Tiene sentido almacenar datos en columnas y procesarlos, cuando sea posible, por columnas. - -Hay dos formas de hacer esto: - -1. Un vector motor. Todas las operaciones se escriben para vectores, en lugar de para valores separados. Esto significa que no necesita llamar a las operaciones con mucha frecuencia, y los costos de envío son insignificantes. El código de operación contiene un ciclo interno optimizado. - -2. Generación de código. El código generado para la consulta tiene todas las llamadas indirectas. - -Esto no se hace en “normal” bases de datos, porque no tiene sentido cuando se ejecutan consultas simples. Sin embargo, hay excepciones. Por ejemplo, MemSQL utiliza la generación de código para reducir la latencia al procesar consultas SQL. (A modo de comparación, los DBMS analíticos requieren la optimización del rendimiento, no la latencia.) - -Tenga en cuenta que para la eficiencia de la CPU, el lenguaje de consulta debe ser declarativo (SQL o MDX), o al menos un vector (J, K). La consulta solo debe contener bucles implícitos, lo que permite la optimización. - -{## [Artículo Original](https://clickhouse.tech/docs/en/) ##} diff --git a/docs/es/interfaces/cli.md b/docs/es/interfaces/cli.md deleted file mode 100644 index 395f9831a4e..00000000000 --- a/docs/es/interfaces/cli.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 17 -toc_title: "Cliente de l\xEDnea de comandos" ---- - -# Cliente de línea de comandos {#command-line-client} - -ClickHouse proporciona un cliente de línea de comandos nativo: `clickhouse-client`. El cliente admite opciones de línea de comandos y archivos de configuración. Para obtener más información, consulte [Configuración](#interfaces_cli_configuration). - -[Instalar](../getting-started/index.md) desde el `clickhouse-client` paquete y ejecútelo con el comando `clickhouse-client`. - -``` bash -$ clickhouse-client -ClickHouse client version 19.17.1.1579 (official build). -Connecting to localhost:9000 as user default. -Connected to ClickHouse server version 19.17.1 revision 54428. - -:) -``` - -Las diferentes versiones de cliente y servidor son compatibles entre sí, pero es posible que algunas funciones no estén disponibles en clientes anteriores. Se recomienda utilizar la misma versión del cliente que la aplicación de servidor. Cuando intenta usar un cliente de la versión anterior, entonces el servidor, `clickhouse-client` muestra el mensaje: - - ClickHouse client version is older than ClickHouse server. It may lack support for new features. - -## Uso {#cli_usage} - -El cliente se puede utilizar en modo interactivo y no interactivo (por lotes). Para utilizar el modo por lotes, especifique el ‘query’ parámetro, o enviar datos a ‘stdin’ (verifica que ‘stdin’ no es un terminal), o ambos. Similar a la interfaz HTTP, cuando se utiliza el ‘query’ parámetro y el envío de datos a ‘stdin’ la solicitud es una concatenación de la ‘query’ parámetro, un avance de línea y los datos en ‘stdin’. Esto es conveniente para grandes consultas INSERT. - -Ejemplo de uso del cliente para insertar datos: - -``` bash -$ echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; - -$ cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; -3, 'some text', '2016-08-14 00:00:00' -4, 'some more text', '2016-08-14 00:00:01' -_EOF - -$ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; -``` - -En el modo por lotes, el formato de datos predeterminado es TabSeparated. Puede establecer el formato en la cláusula FORMAT de la consulta. - -De forma predeterminada, solo puede procesar una única consulta en modo por lotes. Para realizar múltiples consultas desde un “script,” utilizar el `--multiquery` parámetro. Esto funciona para todas las consultas excepto INSERT . Los resultados de la consulta se generan consecutivamente sin separadores adicionales. Del mismo modo, para procesar un gran número de consultas, puede ejecutar ‘clickhouse-client’ para cada consulta. Tenga en cuenta que puede tomar decenas de milisegundos para iniciar el ‘clickhouse-client’ programa. - -En el modo interactivo, obtiene una línea de comandos donde puede ingresar consultas. - -Si ‘multiline’ no se especifica (el valor predeterminado): Para ejecutar la consulta, pulse Intro. El punto y coma no es necesario al final de la consulta. Para introducir una consulta de varias líneas, introduzca una barra invertida `\` antes de la alimentación de línea. Después de presionar Enter, se le pedirá que ingrese la siguiente línea de la consulta. - -Si se especifica multilínea: Para ejecutar una consulta, finalícela con un punto y coma y presione Intro. Si se omitió el punto y coma al final de la línea ingresada, se le pedirá que ingrese la siguiente línea de la consulta. - -Solo se ejecuta una sola consulta, por lo que se ignora todo después del punto y coma. - -Puede especificar `\G` en lugar o después del punto y coma. Esto indica el formato vertical. En este formato, cada valor se imprime en una línea separada, lo cual es conveniente para tablas anchas. Esta característica inusual se agregó por compatibilidad con la CLI de MySQL. - -La línea de comandos se basa en ‘replxx’ (similar a ‘readline’). En otras palabras, utiliza los atajos de teclado familiares y mantiene un historial. La historia está escrita para `~/.clickhouse-client-history`. - -De forma predeterminada, el formato utilizado es PrettyCompact. Puede cambiar el formato en la cláusula FORMAT de la consulta o especificando `\G` al final de la consulta, utilizando el `--format` o `--vertical` en la línea de comandos, o utilizando el archivo de configuración del cliente. - -Para salir del cliente, presione Ctrl+D o introduzca una de las siguientes opciones en lugar de una consulta: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q” - -Al procesar una consulta, el cliente muestra: - -1. Progreso, que se actualiza no más de 10 veces por segundo (de forma predeterminada). Para consultas rápidas, es posible que el progreso no tenga tiempo para mostrarse. -2. La consulta con formato después del análisis, para la depuración. -3. El resultado en el formato especificado. -4. El número de líneas en el resultado, el tiempo transcurrido y la velocidad promedio de procesamiento de consultas. - -Puede cancelar una consulta larga presionando Ctrl + C. Sin embargo, aún tendrá que esperar un poco para que el servidor aborte la solicitud. No es posible cancelar una consulta en determinadas etapas. Si no espera y presiona Ctrl + C por segunda vez, el cliente saldrá. - -El cliente de línea de comandos permite pasar datos externos (tablas temporales externas) para consultar. Para obtener más información, consulte la sección “External data for query processing”. - -### Consultas con parámetros {#cli-queries-with-parameters} - -Puede crear una consulta con parámetros y pasarles valores desde la aplicación cliente. Esto permite evitar formatear consultas con valores dinámicos específicos en el lado del cliente. Por ejemplo: - -``` bash -$ clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {parName:Array(UInt16)}" -``` - -#### Sintaxis de consulta {#cli-queries-with-parameters-syntax} - -Formatee una consulta como de costumbre, luego coloque los valores que desea pasar de los parámetros de la aplicación a la consulta entre llaves en el siguiente formato: - -``` sql -{:} -``` - -- `name` — Placeholder identifier. In the console client it should be used in app parameters as `--param_ = value`. -- `data type` — [Tipo de datos](../sql-reference/data-types/index.md) del valor del parámetro de la aplicación. Por ejemplo, una estructura de datos como `(integer, ('string', integer))` puede tener el `Tuple(UInt8, Tuple(String, UInt8))` tipo de datos (también puede usar otro [entero](../sql-reference/data-types/int-uint.md) tipo). - -#### Ejemplo {#example} - -``` bash -$ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM table WHERE val = {tuple_in_tuple:Tuple(UInt8, Tuple(String, UInt8))}" -``` - -## Configuración {#interfaces_cli_configuration} - -Puede pasar parámetros a `clickhouse-client` (todos los parámetros tienen un valor predeterminado) usando: - -- Desde la línea de comandos - - Las opciones de la línea de comandos anulan los valores y valores predeterminados de los archivos de configuración. - -- Archivos de configuración. - - Los valores de los archivos de configuración anulan los valores predeterminados. - -### Opciones de línea de comandos {#command-line-options} - -- `--host, -h` -– The server name, ‘localhost’ predeterminada. Puede utilizar el nombre o la dirección IPv4 o IPv6. -- `--port` – The port to connect to. Default value: 9000. Note that the HTTP interface and the native interface use different ports. -- `--user, -u` – The username. Default value: default. -- `--password` – The password. Default value: empty string. -- `--query, -q` – The query to process when using non-interactive mode. -- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ predeterminada). -- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter). -- `--multiquery, -n` – If specified, allow processing multiple queries separated by semicolons. -- `--format, -f` – Use the specified default format to output the result. -- `--vertical, -E` – If specified, use the Vertical format by default to output the result. This is the same as ‘–format=Vertical’. En este formato, cada valor se imprime en una línea separada, lo que es útil cuando se muestran tablas anchas. -- `--time, -t` – If specified, print the query execution time to ‘stderr’ en modo no interactivo. -- `--stacktrace` – If specified, also print the stack trace if an exception occurs. -- `--config-file` – The name of the configuration file. -- `--secure` – If specified, will connect to server over secure connection. -- `--param_` — Value for a [consulta con parámetros](#cli-queries-with-parameters). - -### Archivos de configuración {#configuration_files} - -`clickhouse-client` utiliza el primer archivo existente de los siguientes: - -- Definido en el `--config-file` parámetro. -- `./clickhouse-client.xml` -- `~/.clickhouse-client/config.xml` -- `/etc/clickhouse-client/config.xml` - -Ejemplo de un archivo de configuración: - -``` xml - - username - password - False - -``` - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/cli/) diff --git a/docs/es/interfaces/cpp.md b/docs/es/interfaces/cpp.md deleted file mode 100644 index bc5dc3dbc24..00000000000 --- a/docs/es/interfaces/cpp.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 24 -toc_title: Biblioteca de clientes de C++ ---- - -# Biblioteca de clientes de C++ {#c-client-library} - -Ver README en [Bienvenidos](https://github.com/ClickHouse/clickhouse-cpp) repositorio. - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/cpp/) diff --git a/docs/es/interfaces/formats.md b/docs/es/interfaces/formats.md deleted file mode 100644 index 03c1873d306..00000000000 --- a/docs/es/interfaces/formats.md +++ /dev/null @@ -1,1212 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 21 -toc_title: Formatos de entrada y salida ---- - -# Formatos para datos de entrada y salida {#formats} - -ClickHouse puede aceptar y devolver datos en varios formatos. Se puede utilizar un formato admitido para la entrada para analizar los datos proporcionados a `INSERT`s, para llevar a cabo `SELECT`s de una tabla respaldada por archivos como File, URL o HDFS, o para leer un diccionario externo. Se puede utilizar un formato compatible con la salida para organizar el -resultados de un `SELECT`, y realizar `INSERT`s en una tabla respaldada por archivos. - -Los formatos soportados son: - -| Formato | Entrada | Salida | -|-----------------------------------------------------------------|---------|--------| -| [TabSeparated](#tabseparated) | ✔ | ✔ | -| [TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ | -| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | -| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | -| [Plantilla](#format-template) | ✔ | ✔ | -| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | -| [CSV](#csv) | ✔ | ✔ | -| [CSVWithNames](#csvwithnames) | ✔ | ✔ | -| [CustomSeparated](#format-customseparated) | ✔ | ✔ | -| [Valor](#data-format-values) | ✔ | ✔ | -| [Vertical](#vertical) | ✗ | ✔ | -| [VerticalRaw](#verticalraw) | ✗ | ✔ | -| [JSON](#json) | ✗ | ✔ | -| [JSONCompact](#jsoncompact) | ✗ | ✔ | -| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | -| [TSKV](#tskv) | ✔ | ✔ | -| [Bastante](#pretty) | ✗ | ✔ | -| [PrettyCompact](#prettycompact) | ✗ | ✔ | -| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | -| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | -| [Bienvenido a WordPress.](#prettyspace) | ✗ | ✔ | -| [Protobuf](#protobuf) | ✔ | ✔ | -| [Avro](#data-format-avro) | ✔ | ✔ | -| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | -| [Parquet](#data-format-parquet) | ✔ | ✔ | -| [ORC](#data-format-orc) | ✔ | ✗ | -| [RowBinary](#rowbinary) | ✔ | ✔ | -| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | -| [Nativo](#native) | ✔ | ✔ | -| [Nulo](#null) | ✗ | ✔ | -| [XML](#xml) | ✗ | ✔ | -| [CapnProto](#capnproto) | ✔ | ✗ | - -Puede controlar algunos parámetros de procesamiento de formato con la configuración de ClickHouse. Para obtener más información, lea el [Configuración](../operations/settings/settings.md) apartado. - -## TabSeparated {#tabseparated} - -En el formato TabSeparated, los datos se escriben por fila. Cada fila contiene valores separados por pestañas. Cada valor es seguido por una ficha, excepto el último valor de la fila, que es seguido por un avance de línea. Estrictamente las fuentes de línea Unix se asumen en todas partes. La última fila también debe contener un avance de línea al final. Los valores se escriben en formato de texto, sin incluir comillas y con caracteres especiales escapados. - -Este formato también está disponible bajo el nombre `TSV`. - -El `TabSeparated` es conveniente para procesar datos utilizando programas y scripts personalizados. Se usa de forma predeterminada en la interfaz HTTP y en el modo por lotes del cliente de línea de comandos. Este formato también permite transferir datos entre diferentes DBMS. Por ejemplo, puede obtener un volcado de MySQL y subirlo a ClickHouse, o viceversa. - -El `TabSeparated` el formato admite la salida de valores totales (cuando se usa WITH TOTALS) y valores extremos (cuando ‘extremes’ se establece en 1). En estos casos, los valores totales y los extremos se emiten después de los datos principales. El resultado principal, los valores totales y los extremos están separados entre sí por una línea vacía. Ejemplo: - -``` sql -SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated`` -``` - -``` text -2014-03-17 1406958 -2014-03-18 1383658 -2014-03-19 1405797 -2014-03-20 1353623 -2014-03-21 1245779 -2014-03-22 1031592 -2014-03-23 1046491 - -1970-01-01 8873898 - -2014-03-17 1031592 -2014-03-23 1406958 -``` - -### Formato de datos {#data-formatting} - -Los números enteros se escriben en forma decimal. Los números pueden contener un extra “+” carácter al principio (ignorado al analizar y no grabado al formatear). Los números no negativos no pueden contener el signo negativo. Al leer, se permite analizar una cadena vacía como cero, o (para tipos con signo) una cadena que consiste en solo un signo menos como cero. Los números que no encajan en el tipo de datos correspondiente se pueden analizar como un número diferente, sin un mensaje de error. - -Los números de punto flotante se escriben en forma decimal. El punto se usa como separador decimal. Las entradas exponenciales son compatibles, al igual que ‘inf’, ‘+inf’, ‘-inf’, y ‘nan’. Una entrada de números de coma flotante puede comenzar o terminar con un punto decimal. -Durante el formateo, la precisión puede perderse en los números de coma flotante. -Durante el análisis, no es estrictamente necesario leer el número representable de la máquina más cercano. - -Las fechas se escriben en formato AAAA-MM-DD y se analizan en el mismo formato, pero con los caracteres como separadores. -Las fechas con horas se escriben en el formato `YYYY-MM-DD hh:mm:ss` y analizado en el mismo formato, pero con cualquier carácter como separadores. -Todo esto ocurre en la zona horaria del sistema en el momento en que se inicia el cliente o servidor (dependiendo de cuál de ellos formatea los datos). Para fechas con horarios, no se especifica el horario de verano. Por lo tanto, si un volcado tiene tiempos durante el horario de verano, el volcado no coincide inequívocamente con los datos, y el análisis seleccionará una de las dos veces. -Durante una operación de lectura, las fechas incorrectas y las fechas con horas se pueden analizar con desbordamiento natural o como fechas y horas nulas, sin un mensaje de error. - -Como excepción, el análisis de fechas con horas también se admite en el formato de marca de tiempo Unix, si consta de exactamente 10 dígitos decimales. El resultado no depende de la zona horaria. Los formatos AAAA-MM-DD hh:mm:ss y NNNNNNNNNN se diferencian automáticamente. - -Las cadenas se generan con caracteres especiales de escape de barra invertida. Las siguientes secuencias de escape se utilizan para la salida: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\'`, `\\`. El análisis también admite las secuencias `\a`, `\v`, y `\xHH` (secuencias de escape hexagonales) y cualquier `\c` secuencias, donde `c` es cualquier carácter (estas secuencias se convierten en `c`). Por lo tanto, la lectura de datos admite formatos donde un avance de línea se puede escribir como `\n` o `\` o como un avance de línea. Por ejemplo, la cadena `Hello world` con un avance de línea entre las palabras en lugar de espacio se puede analizar en cualquiera de las siguientes variaciones: - -``` text -Hello\nworld - -Hello\ -world -``` - -La segunda variante es compatible porque MySQL la usa al escribir volcados separados por tabuladores. - -El conjunto mínimo de caracteres que debe escapar al pasar datos en formato TabSeparated: tabulación, salto de línea (LF) y barra invertida. - -Solo se escapa un pequeño conjunto de símbolos. Puede tropezar fácilmente con un valor de cadena que su terminal arruinará en la salida. - -Las matrices se escriben como una lista de valores separados por comas entre corchetes. Los elementos numéricos de la matriz tienen el formato normal. `Date` y `DateTime` están escritos entre comillas simples. Las cadenas se escriben entre comillas simples con las mismas reglas de escape que las anteriores. - -[NULL](../sql-reference/syntax.md) se formatea como `\N`. - -Cada elemento de [Anidar](../sql-reference/data-types/nested-data-structures/nested.md) estructuras se representa como una matriz. - -Por ejemplo: - -``` sql -CREATE TABLE nestedt -( - `id` UInt8, - `aux` Nested( - a UInt8, - b String - ) -) -ENGINE = TinyLog -``` - -``` sql -INSERT INTO nestedt Values ( 1, [1], ['a']) -``` - -``` sql -SELECT * FROM nestedt FORMAT TSV -``` - -``` text -1 [1] ['a'] -``` - -## TabSeparatedRaw {#tabseparatedraw} - -Difiere de `TabSeparated` formato en que las filas se escriben sin escapar. -Este formato solo es apropiado para generar un resultado de consulta, pero no para analizar (recuperar datos para insertar en una tabla). - -Este formato también está disponible bajo el nombre `TSVRaw`. - -## TabSeparatedWithNames {#tabseparatedwithnames} - -Difiere de la `TabSeparated` formato en que los nombres de columna se escriben en la primera fila. -Durante el análisis, la primera fila se ignora por completo. No puede usar nombres de columna para determinar su posición o para comprobar su corrección. -(Se puede agregar soporte para analizar la fila de encabezado en el futuro.) - -Este formato también está disponible bajo el nombre `TSVWithNames`. - -## TabSeparatedWithNamesAndTypes {#tabseparatedwithnamesandtypes} - -Difiere de la `TabSeparated` formato en que los nombres de columna se escriben en la primera fila, mientras que los tipos de columna están en la segunda fila. -Durante el análisis, la primera y la segunda filas se ignoran por completo. - -Este formato también está disponible bajo el nombre `TSVWithNamesAndTypes`. - -## Plantilla {#format-template} - -Este formato permite especificar una cadena de formato personalizado con marcadores de posición para los valores con una regla de escape especificada. - -Utiliza la configuración `format_template_resultset`, `format_template_row`, `format_template_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` cuando se utiliza `JSON` escapar, ver más) - -Configuración `format_template_row` especifica la ruta de acceso al archivo, que contiene una cadena de formato para las filas con la siguiente sintaxis: - -`delimiter_1${column_1:serializeAs_1}delimiter_2${column_2:serializeAs_2} ... delimiter_N`, - -donde `delimiter_i` es un delimitador entre valores (`$` símbolo se puede escapar como `$$`), -`column_i` es un nombre o índice de una columna cuyos valores se deben seleccionar o insertar (si está vacío, se omitirá la columna), -`serializeAs_i` es una regla de escape para los valores de columna. Se admiten las siguientes reglas de escape: - -- `CSV`, `JSON`, `XML` (similar a los formatos de los mismos nombres) -- `Escaped` (similar a `TSV`) -- `Quoted` (similar a `Values`) -- `Raw` (sin escapar, de manera similar a `TSVRaw`) -- `None` (sin regla de escape, ver más) - -Si se omite una regla de escape, entonces `None` se utilizará. `XML` y `Raw` son adecuados sólo para la salida. - -Entonces, para la siguiente cadena de formato: - - `Search phrase: ${SearchPhrase:Quoted}, count: ${c:Escaped}, ad price: $$${price:JSON};` - -los valores de `SearchPhrase`, `c` y `price` columnas, que se escapan como `Quoted`, `Escaped` y `JSON` se imprimirá (para seleccionar) o se esperará (para insertar) entre `Search phrase:`, `, count:`, `, ad price: $` y `;` delimitadores respectivamente. Por ejemplo: - -`Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` - -El `format_template_rows_between_delimiter` setting especifica el delimitador entre filas, que se imprime (o se espera) después de cada fila, excepto la última (`\n` predeterminada) - -Configuración `format_template_resultset` especifica la ruta al archivo, que contiene una cadena de formato para el conjunto de resultados. La cadena de formato para el conjunto de resultados tiene la misma sintaxis que una cadena de formato para la fila y permite especificar un prefijo, un sufijo y una forma de imprimir información adicional. Contiene los siguientes marcadores de posición en lugar de nombres de columna: - -- `data` son las filas con datos en `format_template_row` formato, separados por `format_template_rows_between_delimiter`. Este marcador de posición debe ser el primer marcador de posición en la cadena de formato. -- `totals` es la fila con valores totales en `format_template_row` formato (cuando se usa WITH TOTALS) -- `min` es la fila con valores mínimos en `format_template_row` formato (cuando los extremos se establecen en 1) -- `max` es la fila con valores máximos en `format_template_row` formato (cuando los extremos se establecen en 1) -- `rows` es el número total de filas de salida -- `rows_before_limit` es el número mínimo de filas que habría habido sin LIMIT. Salida solo si la consulta contiene LIMIT. Si la consulta contiene GROUP BY, rows_before_limit_at_least es el número exacto de filas que habría habido sin un LIMIT . -- `time` es el tiempo de ejecución de la solicitud en segundos -- `rows_read` es el número de filas que se ha leído -- `bytes_read` es el número de bytes (sin comprimir) que se ha leído - -Marcador `data`, `totals`, `min` y `max` no debe tener una regla de escape especificada (o `None` debe especificarse explícitamente). Los marcadores de posición restantes pueden tener cualquier regla de escape especificada. -Si el `format_template_resultset` valor es una cadena vacía, `${data}` se utiliza como valor predeterminado. -Para el formato de consultas de inserción permite omitir algunas columnas o algunos campos si prefijo o sufijo (ver ejemplo). - -Seleccionar ejemplo: - -``` sql -SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 FORMAT Template SETTINGS -format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = '\n ' -``` - -`/some/path/resultset.format`: - -``` text - - Search phrases - - - - ${data} -
Search phrases
Search phrase Count
- - ${max} -
Max
- Processed ${rows_read:XML} rows in ${time:XML} sec - - -``` - -`/some/path/row.format`: - -``` text - ${0:XML} ${1:XML} -``` - -Resultado: - -``` html - - Search phrases - - - - - - - - -
Search phrases
Search phrase Count
8267016
bathroom interior design 2166
yandex 1655
spring 2014 fashion 1549
freeform photos 1480
- - -
Max
8873898
- Processed 3095973 rows in 0.1569913 sec - - -``` - -Insertar ejemplo: - -``` text -Some header -Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 -Page views: 6, User id: 4324182021466249494, Useless field: world, Duration: 185, Sign: 1 -Total rows: 2 -``` - -``` sql -INSERT INTO UserActivity FORMAT Template SETTINGS -format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format' -``` - -`/some/path/resultset.format`: - -``` text -Some header\n${data}\nTotal rows: ${:CSV}\n -``` - -`/some/path/row.format`: - -``` text -Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV} -``` - -`PageViews`, `UserID`, `Duration` y `Sign` dentro de los marcadores de posición son nombres de columnas en la tabla. Valores después `Useless field` en filas y después `\nTotal rows:` en el sufijo será ignorado. -Todos los delimitadores de los datos de entrada deben ser estrictamente iguales a los delimitadores de las cadenas de formato especificadas. - -## TemplateIgnoreSpaces {#templateignorespaces} - -Este formato es adecuado sólo para la entrada. -Similar a `Template`, pero omite caracteres de espacio en blanco entre delimitadores y valores en la secuencia de entrada. Sin embargo, si las cadenas de formato contienen caracteres de espacio en blanco, se esperarán estos caracteres en la secuencia de entrada. También permite especificar marcadores de posición vacíos (`${}` o `${:None}`) para dividir algún delimitador en partes separadas para ignorar los espacios entre ellos. Dichos marcadores de posición se usan solo para omitir caracteres de espacio en blanco. -Es posible leer `JSON` usando este formato, si los valores de las columnas tienen el mismo orden en todas las filas. Por ejemplo, la siguiente solicitud se puede utilizar para insertar datos del ejemplo de salida de formato [JSON](#json): - -``` sql -INSERT INTO table_name FORMAT TemplateIgnoreSpaces SETTINGS -format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = ',' -``` - -`/some/path/resultset.format`: - -``` text -{${}"meta"${}:${:JSON},${}"data"${}:${}[${data}]${},${}"totals"${}:${:JSON},${}"extremes"${}:${:JSON},${}"rows"${}:${:JSON},${}"rows_before_limit_at_least"${}:${:JSON}${}} -``` - -`/some/path/row.format`: - -``` text -{${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} -``` - -## TSKV {#tskv} - -Similar a TabSeparated , pero genera un valor en formato name=value . Los nombres se escapan de la misma manera que en el formato TabSeparated, y el símbolo = también se escapa. - -``` text -SearchPhrase= count()=8267016 -SearchPhrase=bathroom interior design count()=2166 -SearchPhrase=yandex count()=1655 -SearchPhrase=2014 spring fashion count()=1549 -SearchPhrase=freeform photos count()=1480 -SearchPhrase=angelina jolie count()=1245 -SearchPhrase=omsk count()=1112 -SearchPhrase=photos of dog breeds count()=1091 -SearchPhrase=curtain designs count()=1064 -SearchPhrase=baku count()=1000 -``` - -[NULL](../sql-reference/syntax.md) se formatea como `\N`. - -``` sql -SELECT * FROM t_null FORMAT TSKV -``` - -``` text -x=1 y=\N -``` - -Cuando hay una gran cantidad de columnas pequeñas, este formato no es efectivo y generalmente no hay razón para usarlo. Sin embargo, no es peor que JSONEachRow en términos de eficiencia. - -Both data output and parsing are supported in this format. For parsing, any order is supported for the values of different columns. It is acceptable for some values to be omitted – they are treated as equal to their default values. In this case, zeros and blank rows are used as default values. Complex values that could be specified in the table are not supported as defaults. - -El análisis permite la presencia del campo adicional `tskv` sin el signo igual o un valor. Este campo se ignora. - -## CSV {#csv} - -Formato de valores separados por comas ([RFC](https://tools.ietf.org/html/rfc4180)). - -Al formatear, las filas están encerradas en comillas dobles. Una comilla doble dentro de una cadena se genera como dos comillas dobles en una fila. No hay otras reglas para escapar de los personajes. Fecha y fecha-hora están encerrados en comillas dobles. Los números se emiten sin comillas. Los valores están separados por un carácter delimitador, que es `,` predeterminada. El carácter delimitador se define en la configuración [Formato_csv_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter). Las filas se separan usando el avance de línea Unix (LF). Las matrices se serializan en CSV de la siguiente manera: primero, la matriz se serializa en una cadena como en el formato TabSeparated, y luego la cadena resultante se envía a CSV en comillas dobles. Las tuplas en formato CSV se serializan como columnas separadas (es decir, se pierde su anidamiento en la tupla). - -``` bash -$ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv -``` - -\*De forma predeterminada, el delimitador es `,`. Ver el [Formato_csv_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter) para obtener más información. - -Al analizar, todos los valores se pueden analizar con o sin comillas. Ambas comillas dobles y simples son compatibles. Las filas también se pueden organizar sin comillas. En este caso, se analizan hasta el carácter delimitador o el avance de línea (CR o LF). En violación del RFC, al analizar filas sin comillas, se ignoran los espacios y pestañas iniciales y finales. Para el avance de línea, se admiten los tipos Unix (LF), Windows (CR LF) y Mac OS Classic (CR LF). - -Los valores de entrada vacíos sin comillas se sustituyen por valores predeterminados para las columnas respectivas, si -[Entrada_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) -está habilitado. - -`NULL` se formatea como `\N` o `NULL` o una cadena vacía sin comillas (consulte la configuración [input_format_csv_unquoted_null_literal_as_null](../operations/settings/settings.md#settings-input_format_csv_unquoted_null_literal_as_null) y [Entrada_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)). - -El formato CSV admite la salida de totales y extremos de la misma manera que `TabSeparated`. - -## CSVWithNames {#csvwithnames} - -También imprime la fila del encabezado, similar a `TabSeparatedWithNames`. - -## CustomSeparated {#format-customseparated} - -Similar a [Plantilla](#format-template), pero imprime o lee todas las columnas y usa la regla de escape de la configuración `format_custom_escaping_rule` y delimitadores desde la configuración `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` y `format_custom_result_after_delimiter`, no de cadenas de formato. -También hay `CustomSeparatedIgnoreSpaces` formato, que es similar a `TemplateIgnoreSpaces`. - -## JSON {#json} - -Salida de datos en formato JSON. Además de las tablas de datos, también genera nombres y tipos de columnas, junto con información adicional: el número total de filas de salida y el número de filas que podrían haberse generado si no hubiera un LIMIT . Ejemplo: - -``` sql -SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTALS ORDER BY c DESC LIMIT 5 FORMAT JSON -``` - -``` json -{ - "meta": - [ - { - "name": "SearchPhrase", - "type": "String" - }, - { - "name": "c", - "type": "UInt64" - } - ], - - "data": - [ - { - "SearchPhrase": "", - "c": "8267016" - }, - { - "SearchPhrase": "bathroom interior design", - "c": "2166" - }, - { - "SearchPhrase": "yandex", - "c": "1655" - }, - { - "SearchPhrase": "spring 2014 fashion", - "c": "1549" - }, - { - "SearchPhrase": "freeform photos", - "c": "1480" - } - ], - - "totals": - { - "SearchPhrase": "", - "c": "8873898" - }, - - "extremes": - { - "min": - { - "SearchPhrase": "", - "c": "1480" - }, - "max": - { - "SearchPhrase": "", - "c": "8267016" - } - }, - - "rows": 5, - - "rows_before_limit_at_least": 141137 -} -``` - -El JSON es compatible con JavaScript. Para garantizar esto, algunos caracteres se escapan adicionalmente: la barra inclinada `/` se escapa como `\/`; saltos de línea alternativos `U+2028` y `U+2029`, que rompen algunos navegadores, se escapan como `\uXXXX`. Los caracteres de control ASCII se escapan: retroceso, avance de formulario, avance de línea, retorno de carro y tabulación horizontal se reemplazan con `\b`, `\f`, `\n`, `\r`, `\t` , así como los bytes restantes en el rango 00-1F usando `\uXXXX` sequences. Invalid UTF-8 sequences are changed to the replacement character � so the output text will consist of valid UTF-8 sequences. For compatibility with JavaScript, Int64 and UInt64 integers are enclosed in double-quotes by default. To remove the quotes, you can set the configuration parameter [output_format_json_quote_64bit_integers](../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) a 0. - -`rows` – The total number of output rows. - -`rows_before_limit_at_least` El número mínimo de filas habría sido sin LIMIT . Salida solo si la consulta contiene LIMIT. -Si la consulta contiene GROUP BY, rows_before_limit_at_least es el número exacto de filas que habría habido sin un LIMIT . - -`totals` – Total values (when using WITH TOTALS). - -`extremes` – Extreme values (when extremes are set to 1). - -Este formato solo es apropiado para generar un resultado de consulta, pero no para analizar (recuperar datos para insertar en una tabla). - -Soporta ClickHouse [NULL](../sql-reference/syntax.md), que se muestra como `null` en la salida JSON. - -Ver también el [JSONEachRow](#jsoneachrow) formato. - -## JSONCompact {#jsoncompact} - -Difiere de JSON solo en que las filas de datos se generan en matrices, no en objetos. - -Ejemplo: - -``` json -{ - "meta": - [ - { - "name": "SearchPhrase", - "type": "String" - }, - { - "name": "c", - "type": "UInt64" - } - ], - - "data": - [ - ["", "8267016"], - ["bathroom interior design", "2166"], - ["yandex", "1655"], - ["fashion trends spring 2014", "1549"], - ["freeform photo", "1480"] - ], - - "totals": ["","8873898"], - - "extremes": - { - "min": ["","1480"], - "max": ["","8267016"] - }, - - "rows": 5, - - "rows_before_limit_at_least": 141137 -} -``` - -Este formato solo es apropiado para generar un resultado de consulta, pero no para analizar (recuperar datos para insertar en una tabla). -Ver también el `JSONEachRow` formato. - -## JSONEachRow {#jsoneachrow} - -Al usar este formato, ClickHouse genera filas como objetos JSON separados, delimitados por nuevas líneas, pero los datos en su conjunto no son JSON válidos. - -``` json -{"SearchPhrase":"curtain designs","count()":"1064"} -{"SearchPhrase":"baku","count()":"1000"} -{"SearchPhrase":"","count()":"8267016"} -``` - -Al insertar los datos, debe proporcionar un objeto JSON independiente para cada fila. - -### Insertar datos {#inserting-data} - -``` sql -INSERT INTO UserActivity FORMAT JSONEachRow {"PageViews":5, "UserID":"4324182021466249494", "Duration":146,"Sign":-1} {"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} -``` - -ClickHouse permite: - -- Cualquier orden de pares clave-valor en el objeto. -- Omitiendo algunos valores. - -ClickHouse ignora los espacios entre los elementos y las comas después de los objetos. Puede pasar todos los objetos en una línea. No tiene que separarlos con saltos de línea. - -**Procesamiento de valores omitidos** - -ClickHouse sustituye los valores omitidos por los valores predeterminados para el [tipos de datos](../sql-reference/data-types/index.md). - -Si `DEFAULT expr` se especifica, ClickHouse utiliza diferentes reglas de sustitución dependiendo de la [Entrada_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) configuración. - -Considere la siguiente tabla: - -``` sql -CREATE TABLE IF NOT EXISTS example_table -( - x UInt32, - a DEFAULT x * 2 -) ENGINE = Memory; -``` - -- Si `input_format_defaults_for_omitted_fields = 0`, entonces el valor predeterminado para `x` y `a` igual `0` (como el valor predeterminado para el `UInt32` tipo de datos). -- Si `input_format_defaults_for_omitted_fields = 1`, entonces el valor predeterminado para `x` igual `0` pero el valor predeterminado de `a` igual `x * 2`. - -!!! note "Advertencia" - Al insertar datos con `insert_sample_with_metadata = 1`, ClickHouse consume más recursos computacionales, en comparación con la inserción con `insert_sample_with_metadata = 0`. - -### Selección de datos {#selecting-data} - -Considere el `UserActivity` tabla como un ejemplo: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -Consulta `SELECT * FROM UserActivity FORMAT JSONEachRow` devoluciones: - -``` text -{"UserID":"4324182021466249494","PageViews":5,"Duration":146,"Sign":-1} -{"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} -``` - -A diferencia de la [JSON](#json) formato, no hay sustitución de secuencias UTF-8 no válidas. Los valores se escapan de la misma manera que para `JSON`. - -!!! note "Nota" - Cualquier conjunto de bytes se puede generar en las cadenas. Utilice el `JSONEachRow` si está seguro de que los datos de la tabla se pueden formatear como JSON sin perder ninguna información. - -### Uso de estructuras anidadas {#jsoneachrow-nested} - -Si tienes una mesa con [Anidar](../sql-reference/data-types/nested-data-structures/nested.md) columnas de tipo de datos, puede insertar datos JSON con la misma estructura. Habilite esta función con el [Entrada_format_import_nested_json](../operations/settings/settings.md#settings-input_format_import_nested_json) configuración. - -Por ejemplo, considere la siguiente tabla: - -``` sql -CREATE TABLE json_each_row_nested (n Nested (s String, i Int32) ) ENGINE = Memory -``` - -Como se puede ver en el `Nested` descripción del tipo de datos, ClickHouse trata cada componente de la estructura anidada como una columna separada (`n.s` y `n.i` para nuestra mesa). Puede insertar datos de la siguiente manera: - -``` sql -INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]} -``` - -Para insertar datos como un objeto JSON jerárquico, establezca [input_format_import_nested_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json). - -``` json -{ - "n": { - "s": ["abc", "def"], - "i": [1, 23] - } -} -``` - -Sin esta configuración, ClickHouse produce una excepción. - -``` sql -SELECT name, value FROM system.settings WHERE name = 'input_format_import_nested_json' -``` - -``` text -┌─name────────────────────────────┬─value─┐ -│ input_format_import_nested_json │ 0 │ -└─────────────────────────────────┴───────┘ -``` - -``` sql -INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n": {"s": ["abc", "def"], "i": [1, 23]}} -``` - -``` text -Code: 117. DB::Exception: Unknown field found while parsing JSONEachRow format: n: (at row 1) -``` - -``` sql -SET input_format_import_nested_json=1 -INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n": {"s": ["abc", "def"], "i": [1, 23]}} -SELECT * FROM json_each_row_nested -``` - -``` text -┌─n.s───────────┬─n.i────┐ -│ ['abc','def'] │ [1,23] │ -└───────────────┴────────┘ -``` - -## Nativo {#native} - -El formato más eficiente. Los datos son escritos y leídos por bloques en formato binario. Para cada bloque, el número de filas, número de columnas, nombres y tipos de columnas y partes de columnas de este bloque se registran una tras otra. En otras palabras, este formato es “columnar” – it doesn't convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients. - -Puede utilizar este formato para generar rápidamente volcados que sólo pueden ser leídos por el DBMS de ClickHouse. No tiene sentido trabajar con este formato usted mismo. - -## Nulo {#null} - -Nada es salida. Sin embargo, la consulta se procesa y, cuando se utiliza el cliente de línea de comandos, los datos se transmiten al cliente. Esto se usa para pruebas, incluidas las pruebas de rendimiento. -Obviamente, este formato solo es apropiado para la salida, no para el análisis. - -## Bastante {#pretty} - -Salidas de datos como tablas de arte Unicode, también utilizando secuencias de escape ANSI para establecer colores en el terminal. -Se dibuja una cuadrícula completa de la tabla, y cada fila ocupa dos líneas en la terminal. -Cada bloque de resultados se muestra como una tabla separada. Esto es necesario para que los bloques se puedan generar sin resultados de almacenamiento en búfer (el almacenamiento en búfer sería necesario para calcular previamente el ancho visible de todos los valores). - -[NULL](../sql-reference/syntax.md) se emite como `ᴺᵁᴸᴸ`. - -Ejemplo (mostrado para el [PrettyCompact](#prettycompact) formato): - -``` sql -SELECT * FROM t_null -``` - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -└───┴──────┘ -``` - -Las filas no se escapan en formatos Pretty \*. Se muestra un ejemplo para el [PrettyCompact](#prettycompact) formato: - -``` sql -SELECT 'String with \'quotes\' and \t character' AS Escaping_test -``` - -``` text -┌─Escaping_test────────────────────────┐ -│ String with 'quotes' and character │ -└──────────────────────────────────────┘ -``` - -Para evitar volcar demasiados datos al terminal, solo se imprimen las primeras 10.000 filas. Si el número de filas es mayor o igual que 10.000, el mensaje “Showed first 10 000” se imprime. -Este formato solo es apropiado para generar un resultado de consulta, pero no para analizar (recuperar datos para insertar en una tabla). - -El formato Pretty admite la salida de valores totales (cuando se usa WITH TOTALS) y extremos (cuando ‘extremes’ se establece en 1). En estos casos, los valores totales y los valores extremos se generan después de los datos principales, en tablas separadas. Ejemplo (mostrado para el [PrettyCompact](#prettycompact) formato): - -``` sql -SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT PrettyCompact -``` - -``` text -┌──EventDate─┬───────c─┐ -│ 2014-03-17 │ 1406958 │ -│ 2014-03-18 │ 1383658 │ -│ 2014-03-19 │ 1405797 │ -│ 2014-03-20 │ 1353623 │ -│ 2014-03-21 │ 1245779 │ -│ 2014-03-22 │ 1031592 │ -│ 2014-03-23 │ 1046491 │ -└────────────┴─────────┘ - -Totals: -┌──EventDate─┬───────c─┐ -│ 1970-01-01 │ 8873898 │ -└────────────┴─────────┘ - -Extremes: -┌──EventDate─┬───────c─┐ -│ 2014-03-17 │ 1031592 │ -│ 2014-03-23 │ 1406958 │ -└────────────┴─────────┘ -``` - -## PrettyCompact {#prettycompact} - -Difiere de [Bastante](#pretty) en que la cuadrícula se dibuja entre filas y el resultado es más compacto. -Este formato se usa de forma predeterminada en el cliente de línea de comandos en modo interactivo. - -## PrettyCompactMonoBlock {#prettycompactmonoblock} - -Difiere de [PrettyCompact](#prettycompact) en que hasta 10,000 filas se almacenan en búfer, luego se salen como una sola tabla, no por bloques. - -## PrettyNoEscapes {#prettynoescapes} - -Difiere de Pretty en que las secuencias de escape ANSI no se usan. Esto es necesario para mostrar este formato en un navegador, así como para usar el ‘watch’ utilidad de línea de comandos. - -Ejemplo: - -``` bash -$ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FORMAT PrettyCompactNoEscapes'" -``` - -Puede usar la interfaz HTTP para mostrar en el navegador. - -### PrettyCompactNoEscapes {#prettycompactnoescapes} - -Lo mismo que el ajuste anterior. - -### PrettySpaceNoEscapes {#prettyspacenoescapes} - -Lo mismo que el ajuste anterior. - -## Bienvenido a WordPress {#prettyspace} - -Difiere de [PrettyCompact](#prettycompact) en ese espacio en blanco (caracteres de espacio) se usa en lugar de la cuadrícula. - -## RowBinary {#rowbinary} - -Formatea y analiza datos por fila en formato binario. Las filas y los valores se enumeran consecutivamente, sin separadores. -Este formato es menos eficiente que el formato nativo, ya que está basado en filas. - -Los integradores usan una representación little-endian de longitud fija. Por ejemplo, UInt64 usa 8 bytes. -DateTime se representa como UInt32 que contiene la marca de tiempo Unix como el valor. -Date se representa como un objeto UInt16 que contiene el número de días desde 1970-01-01 como el valor. -La cadena se representa como una longitud varint (sin signo [LEB128](https://en.wikipedia.org/wiki/LEB128)), seguido de los bytes de la cadena. -FixedString se representa simplemente como una secuencia de bytes. - -La matriz se representa como una longitud varint (sin signo [LEB128](https://en.wikipedia.org/wiki/LEB128)), seguido de elementos sucesivos de la matriz. - -Para [NULL](../sql-reference/syntax.md#null-literal) soporte, se añade un byte adicional que contiene 1 o 0 antes de cada [NULL](../sql-reference/data-types/nullable.md) valor. Si 1, entonces el valor es `NULL` y este byte se interpreta como un valor separado. Si es 0, el valor después del byte no es `NULL`. - -## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes} - -Similar a [RowBinary](#rowbinary), pero con encabezado añadido: - -- [LEB128](https://en.wikipedia.org/wiki/LEB128)-número codificado de columnas (N) -- N `String`s especificando nombres de columna -- N `String`s especificando tipos de columna - -## Valor {#data-format-values} - -Imprime cada fila entre paréntesis. Las filas están separadas por comas. No hay coma después de la última fila. Los valores dentro de los corchetes también están separados por comas. Los números se emiten en formato decimal sin comillas. Las matrices se emiten entre corchetes. Las cadenas, fechas y fechas con horas se generan entre comillas. Las reglas de escape y el análisis son similares a las [TabSeparated](#tabseparated) formato. Durante el formateo, los espacios adicionales no se insertan, pero durante el análisis, se permiten y omiten (excepto los espacios dentro de los valores de la matriz, que no están permitidos). [NULL](../sql-reference/syntax.md) se representa como `NULL`. - -The minimum set of characters that you need to escape when passing data in Values ​​format: single quotes and backslashes. - -Este es el formato que se utiliza en `INSERT INTO t VALUES ...`, pero también puede usarlo para formatear los resultados de la consulta. - -Ver también: [input_format_values_interpret_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) y [input_format_values_deduce_templates_of_expressions](../operations/settings/settings.md#settings-input_format_values_deduce_templates_of_expressions) configuración. - -## Vertical {#vertical} - -Imprime cada valor en una línea independiente con el nombre de columna especificado. Este formato es conveniente para imprimir solo una o varias filas si cada fila consta de un gran número de columnas. - -[NULL](../sql-reference/syntax.md) se emite como `ᴺᵁᴸᴸ`. - -Ejemplo: - -``` sql -SELECT * FROM t_null FORMAT Vertical -``` - -``` text -Row 1: -────── -x: 1 -y: ᴺᵁᴸᴸ -``` - -Las filas no se escapan en formato vertical: - -``` sql -SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT Vertical -``` - -``` text -Row 1: -────── -test: string with 'quotes' and with some special - characters -``` - -Este formato solo es apropiado para generar un resultado de consulta, pero no para analizar (recuperar datos para insertar en una tabla). - -## VerticalRaw {#verticalraw} - -Similar a [Vertical](#vertical), pero con escapar deshabilitado. Este formato solo es adecuado para generar resultados de consultas, no para analizar (recibir datos e insertarlos en la tabla). - -## XML {#xml} - -El formato XML es adecuado solo para la salida, no para el análisis. Ejemplo: - -``` xml - - - - - - SearchPhrase - String - - - count() - UInt64 - - - - - - - 8267016 - - - bathroom interior design - 2166 - - - yandex - 1655 - - - 2014 spring fashion - 1549 - - - freeform photos - 1480 - - - angelina jolie - 1245 - - - omsk - 1112 - - - photos of dog breeds - 1091 - - - curtain designs - 1064 - - - baku - 1000 - - - 10 - 141137 - -``` - -Si el nombre de la columna no tiene un formato aceptable, simplemente ‘field’ se utiliza como el nombre del elemento. En general, la estructura XML sigue la estructura JSON. -Just as for JSON, invalid UTF-8 sequences are changed to the replacement character � so the output text will consist of valid UTF-8 sequences. - -En los valores de cadena, los caracteres `<` y `&` se escaparon como `<` y `&`. - -Las matrices se emiten como `HelloWorld...`y tuplas como `HelloWorld...`. - -## CapnProto {#capnproto} - -Cap'n Proto es un formato de mensaje binario similar a Protocol Buffers y Thrift, pero no como JSON o MessagePack. - -Los mensajes de Cap'n Proto están estrictamente escritos y no autodescribidos, lo que significa que necesitan una descripción de esquema externo. El esquema se aplica sobre la marcha y se almacena en caché para cada consulta. - -``` bash -$ cat capnproto_messages.bin | clickhouse-client --query "INSERT INTO test.hits FORMAT CapnProto SETTINGS format_schema='schema:Message'" -``` - -Donde `schema.capnp` se ve así: - -``` capnp -struct Message { - SearchPhrase @0 :Text; - c @1 :Uint64; -} -``` - -La deserialización es efectiva y generalmente no aumenta la carga del sistema. - -Ver también [Esquema de formato](#formatschema). - -## Protobuf {#protobuf} - -Protobuf - es un [Búferes de protocolo](https://developers.google.com/protocol-buffers/) formato. - -Este formato requiere un esquema de formato externo. El esquema se almacena en caché entre las consultas. -ClickHouse soporta ambos `proto2` y `proto3` sintaxis. Se admiten campos repetidos / opcionales / requeridos. - -Ejemplos de uso: - -``` sql -SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:MessageType' -``` - -``` bash -cat protobuf_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT Protobuf SETTINGS format_schema='schemafile:MessageType'" -``` - -donde el archivo `schemafile.proto` se ve así: - -``` capnp -syntax = "proto3"; - -message MessageType { - string name = 1; - string surname = 2; - uint32 birthDate = 3; - repeated string phoneNumbers = 4; -}; -``` - -Para encontrar la correspondencia entre las columnas de la tabla y los campos del tipo de mensaje de Protocol Buffers, ClickHouse compara sus nombres. -Esta comparación no distingue entre mayúsculas y minúsculas y los caracteres `_` (subrayado) y `.` (punto) se consideran iguales. -Si los tipos de una columna y un campo del mensaje de Protocol Buffers son diferentes, se aplica la conversión necesaria. - -Los mensajes anidados son compatibles. Por ejemplo, para el campo `z` en el siguiente tipo de mensaje - -``` capnp -message MessageType { - message XType { - message YType { - int32 z; - }; - repeated YType y; - }; - XType x; -}; -``` - -ClickHouse intenta encontrar una columna llamada `x.y.z` (o `x_y_z` o `X.y_Z` y así sucesivamente). -Los mensajes anidados son adecuados para [estructuras de datos anidados](../sql-reference/data-types/nested-data-structures/nested.md). - -Valores predeterminados definidos en un esquema protobuf como este - -``` capnp -syntax = "proto2"; - -message MessageType { - optional int32 result_per_page = 3 [default = 10]; -} -``` - -no se aplican; el [valores predeterminados de la tabla](../sql-reference/statements/create.md#create-default-values) se utilizan en lugar de ellos. - -ClickHouse entra y emite mensajes protobuf en el `length-delimited` formato. -Significa que antes de cada mensaje debe escribirse su longitud como un [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). -Ver también [cómo leer / escribir mensajes protobuf delimitados por longitud en idiomas populares](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). - -## Avro {#data-format-avro} - -[Más información](http://avro.apache.org/) es un marco de serialización de datos orientado a filas desarrollado dentro del proyecto Hadoop de Apache. - -El formato ClickHouse Avro admite lectura y escritura [Archivos de datos Avro](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). - -### Coincidencia de tipos de datos {#data_types-matching} - -La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../sql-reference/data-types/index.md) en `INSERT` y `SELECT` consulta. - -| Tipo de datos Avro `INSERT` | Tipo de datos ClickHouse | Tipo de datos Avro `SELECT` | -|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|------------------------------| -| `boolean`, `int`, `long`, `float`, `double` | [¿Cómo funciona?)](../sql-reference/data-types/int-uint.md), [UInt(8\|16\|32)](../sql-reference/data-types/int-uint.md) | `int` | -| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql-reference/data-types/int-uint.md), [UInt64](../sql-reference/data-types/int-uint.md) | `long` | -| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql-reference/data-types/float.md) | `float` | -| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql-reference/data-types/float.md) | `double` | -| `bytes`, `string`, `fixed`, `enum` | [Cadena](../sql-reference/data-types/string.md) | `bytes` | -| `bytes`, `string`, `fixed` | [Cadena fija (N)](../sql-reference/data-types/fixedstring.md) | `fixed(N)` | -| `enum` | [Enum (8\|16)](../sql-reference/data-types/enum.md) | `enum` | -| `array(T)` | [Matriz (T)](../sql-reference/data-types/array.md) | `array(T)` | -| `union(null, T)`, `union(T, null)` | [Nivel de Cifrado WEP)](../sql-reference/data-types/date.md) | `union(null, T)` | -| `null` | [Nullable (nada)](../sql-reference/data-types/special-data-types/nothing.md) | `null` | -| `int (date)` \* | [Fecha](../sql-reference/data-types/date.md) | `int (date)` \* | -| `long (timestamp-millis)` \* | [¿Qué puedes encontrar en Neodigit)](../sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \* | -| `long (timestamp-micros)` \* | [Cómo hacer esto?)](../sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \* | - -\* [Tipos lógicos Avro](http://avro.apache.org/docs/current/spec.html#Logical+Types) - -Tipos de datos Avro no admitidos: `record` (no root), `map` - -Tipos de datos lógicos Avro no admitidos: `uuid`, `time-millis`, `time-micros`, `duration` - -### Insertar datos {#inserting-data-1} - -Para insertar datos de un archivo Avro en la tabla ClickHouse: - -``` bash -$ cat file.avro | clickhouse-client --query="INSERT INTO {some_table} FORMAT Avro" -``` - -El esquema raíz del archivo Avro de entrada debe ser de `record` tipo. - -Para encontrar la correspondencia entre las columnas de la tabla y los campos de Avro esquema ClickHouse compara sus nombres. Esta comparación distingue entre mayúsculas y minúsculas. -Los campos no utilizados se omiten. - -Los tipos de datos de las columnas de tabla ClickHouse pueden diferir de los campos correspondientes de los datos de Avro insertados. Al insertar datos, ClickHouse interpreta los tipos de datos de acuerdo con la tabla anterior y luego [elenco](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) los datos al tipo de columna correspondiente. - -### Selección de datos {#selecting-data-1} - -Para seleccionar datos de la tabla ClickHouse en un archivo Avro: - -``` bash -$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro -``` - -Los nombres de columna deben: - -- comenzar con `[A-Za-z_]` -- posteriormente contienen sólo `[A-Za-z0-9_]` - -La compresión de archivos Avro de salida y el intervalo de sincronización se pueden configurar con [Sistema abierto.](../operations/settings/settings.md#settings-output_format_avro_codec) y [Sistema abierto.](../operations/settings/settings.md#settings-output_format_avro_sync_interval) respectivamente. - -## AvroConfluent {#data-format-avro-confluent} - -AvroConfluent admite la decodificación de mensajes Avro de un solo objeto comúnmente utilizados con [Kafka](https://kafka.apache.org/) y [Registro de Esquemas Confluentes](https://docs.confluent.io/current/schema-registry/index.html). - -Cada mensaje de Avro incrusta un id de esquema que se puede resolver en el esquema real con la ayuda del Registro de esquemas. - -Los esquemas se almacenan en caché una vez resueltos. - -La URL del registro de esquemas se configura con [Todos los derechos reservados.](../operations/settings/settings.md#settings-format_avro_schema_registry_url) - -### Coincidencia de tipos de datos {#data_types-matching-1} - -Lo mismo que [Avro](#data-format-avro) - -### Uso {#usage} - -Para verificar rápidamente la resolución del esquema, puede usar [Método de codificación de datos:](https://github.com/edenhill/kafkacat) con [Sistema abierto.](../operations/utilities/clickhouse-local.md#clickhouse-local): - -``` bash -$ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' -1 a -2 b -3 c -``` - -Utilizar `AvroConfluent` con [Kafka](../engines/table-engines/integrations/kafka.md): - -``` sql -CREATE TABLE topic1_stream -( - field1 String, - field2 String -) -ENGINE = Kafka() -SETTINGS -kafka_broker_list = 'kafka-broker', -kafka_topic_list = 'topic1', -kafka_group_name = 'group1', -kafka_format = 'AvroConfluent'; - -SET format_avro_schema_registry_url = 'http://schema-registry'; - -SELECT * FROM topic1_stream; -``` - -!!! note "Advertencia" - Configuración `format_avro_schema_registry_url` necesita ser configurado en `users.xml` para mantener su valor después de un reinicio. - -## Parquet {#data-format-parquet} - -[Apache Parquet](http://parquet.apache.org/) es un formato de almacenamiento columnar generalizado en el ecosistema Hadoop. ClickHouse admite operaciones de lectura y escritura para este formato. - -### Coincidencia de tipos de datos {#data_types-matching-2} - -La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../sql-reference/data-types/index.md) en `INSERT` y `SELECT` consulta. - -| Tipo de datos de parquet (`INSERT`) | Tipo de datos ClickHouse | Tipo de datos de parquet (`SELECT`) | -|-------------------------------------|-----------------------------------------------------------|-------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../sql-reference/data-types/int-uint.md) | `UINT8` | -| `INT8` | [Int8](../sql-reference/data-types/int-uint.md) | `INT8` | -| `UINT16` | [UInt16](../sql-reference/data-types/int-uint.md) | `UINT16` | -| `INT16` | [Int16](../sql-reference/data-types/int-uint.md) | `INT16` | -| `UINT32` | [UInt32](../sql-reference/data-types/int-uint.md) | `UINT32` | -| `INT32` | [Int32](../sql-reference/data-types/int-uint.md) | `INT32` | -| `UINT64` | [UInt64](../sql-reference/data-types/int-uint.md) | `UINT64` | -| `INT64` | [Int64](../sql-reference/data-types/int-uint.md) | `INT64` | -| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT` | -| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` | -| `DATE32` | [Fecha](../sql-reference/data-types/date.md) | `UINT16` | -| `DATE64`, `TIMESTAMP` | [FechaHora](../sql-reference/data-types/datetime.md) | `UINT32` | -| `STRING`, `BINARY` | [Cadena](../sql-reference/data-types/string.md) | `STRING` | -| — | [Cadena fija](../sql-reference/data-types/fixedstring.md) | `STRING` | -| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | - -ClickHouse admite una precisión configurable de `Decimal` tipo. El `INSERT` consulta trata el Parquet `DECIMAL` tipo como el ClickHouse `Decimal128` tipo. - -Tipos de datos de parquet no admitidos: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. - -Los tipos de datos de las columnas de tabla ClickHouse pueden diferir de los campos correspondientes de los datos de Parquet insertados. Al insertar datos, ClickHouse interpreta los tipos de datos de acuerdo con la tabla anterior y luego [elenco](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) los datos de ese tipo de datos que se establece para la columna de tabla ClickHouse. - -### Insertar y seleccionar datos {#inserting-and-selecting-data} - -Puede insertar datos de Parquet desde un archivo en la tabla ClickHouse mediante el siguiente comando: - -``` bash -$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" -``` - -Puede seleccionar datos de una tabla ClickHouse y guardarlos en algún archivo en el formato Parquet mediante el siguiente comando: - -``` bash -$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} -``` - -Para intercambiar datos con Hadoop, puede usar [Motor de mesa HDFS](../engines/table-engines/integrations/hdfs.md). - -## ORC {#data-format-orc} - -[Apache ORC](https://orc.apache.org/) es un formato de almacenamiento columnar generalizado en el ecosistema Hadoop. Solo puede insertar datos en este formato en ClickHouse. - -### Coincidencia de tipos de datos {#data_types-matching-3} - -La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../sql-reference/data-types/index.md) en `INSERT` consulta. - -| Tipo de datos ORC (`INSERT`) | Tipo de datos ClickHouse | -|------------------------------|------------------------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../sql-reference/data-types/int-uint.md) | -| `INT8` | [Int8](../sql-reference/data-types/int-uint.md) | -| `UINT16` | [UInt16](../sql-reference/data-types/int-uint.md) | -| `INT16` | [Int16](../sql-reference/data-types/int-uint.md) | -| `UINT32` | [UInt32](../sql-reference/data-types/int-uint.md) | -| `INT32` | [Int32](../sql-reference/data-types/int-uint.md) | -| `UINT64` | [UInt64](../sql-reference/data-types/int-uint.md) | -| `INT64` | [Int64](../sql-reference/data-types/int-uint.md) | -| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | -| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | -| `DATE32` | [Fecha](../sql-reference/data-types/date.md) | -| `DATE64`, `TIMESTAMP` | [FechaHora](../sql-reference/data-types/datetime.md) | -| `STRING`, `BINARY` | [Cadena](../sql-reference/data-types/string.md) | -| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | - -ClickHouse soporta la precisión configurable de la `Decimal` tipo. El `INSERT` consulta trata el ORC `DECIMAL` tipo como el ClickHouse `Decimal128` tipo. - -Tipos de datos ORC no admitidos: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. - -Los tipos de datos de las columnas de tabla ClickHouse no tienen que coincidir con los campos de datos ORC correspondientes. Al insertar datos, ClickHouse interpreta los tipos de datos de acuerdo con la tabla anterior y luego [elenco](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) los datos al tipo de datos establecido para la columna de tabla ClickHouse. - -### Insertar datos {#inserting-data-2} - -Puede insertar datos ORC de un archivo en la tabla ClickHouse mediante el siguiente comando: - -``` bash -$ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" -``` - -Para intercambiar datos con Hadoop, puede usar [Motor de mesa HDFS](../engines/table-engines/integrations/hdfs.md). - -## Esquema de formato {#formatschema} - -El valor establece el nombre de archivo que contiene el esquema de formato `format_schema`. -Es necesario establecer esta configuración cuando se utiliza uno de los formatos `Cap'n Proto` y `Protobuf`. -El esquema de formato es una combinación de un nombre de archivo y el nombre de un tipo de mensaje en este archivo, delimitado por dos puntos, -e.g. `schemafile.proto:MessageType`. -Si el archivo tiene la extensión estándar para el formato (por ejemplo, `.proto` para `Protobuf`), -se puede omitir y en este caso, el esquema de formato se ve así `schemafile:MessageType`. - -Si introduce o emite datos a través del [cliente](../interfaces/cli.md) en el [modo interactivo](../interfaces/cli.md#cli_usage), el nombre de archivo especificado en el esquema de formato -puede contener una ruta absoluta o una ruta relativa al directorio actual en el cliente. -Si utiliza el cliente en el [modo por lotes](../interfaces/cli.md#cli_usage), la ruta de acceso al esquema debe ser relativa por razones de seguridad. - -Si introduce o emite datos a través del [Interfaz HTTP](../interfaces/http.md) el nombre de archivo especificado en el esquema de formato -debe estar ubicado en el directorio especificado en [format_schema_path](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-format_schema_path) -en la configuración del servidor. - -## Salto de errores {#skippingerrors} - -Algunos formatos como `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` y `Protobuf` puede omitir la fila rota si se produjo un error de análisis y continuar el análisis desde el comienzo de la siguiente fila. Ver [Entrada_format_allow_errors_num](../operations/settings/settings.md#settings-input_format_allow_errors_num) y -[Entrada_format_allow_errors_ratio](../operations/settings/settings.md#settings-input_format_allow_errors_ratio) configuración. -Limitacion: -- En caso de error de análisis `JSONEachRow` omite todos los datos hasta la nueva línea (o EOF), por lo que las filas deben estar delimitadas por `\n` para contar los errores correctamente. -- `Template` y `CustomSeparated` use el delimitador después de la última columna y el delimitador entre filas para encontrar el comienzo de la siguiente fila, por lo que omitir errores solo funciona si al menos uno de ellos no está vacío. - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/formats/) diff --git a/docs/es/interfaces/http.md b/docs/es/interfaces/http.md deleted file mode 100644 index ab510a268e3..00000000000 --- a/docs/es/interfaces/http.md +++ /dev/null @@ -1,617 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 19 -toc_title: Interfaz HTTP ---- - -# Interfaz HTTP {#http-interface} - -La interfaz HTTP le permite usar ClickHouse en cualquier plataforma desde cualquier lenguaje de programación. Lo usamos para trabajar desde Java y Perl, así como scripts de shell. En otros departamentos, la interfaz HTTP se usa desde Perl, Python y Go. La interfaz HTTP es más limitada que la interfaz nativa, pero tiene una mejor compatibilidad. - -De forma predeterminada, clickhouse-server escucha HTTP en el puerto 8123 (esto se puede cambiar en la configuración). - -Si realiza una solicitud GET / sin parámetros, devuelve 200 códigos de respuesta y la cadena que definió en [http_server_default_response](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response) valor predeterminado “Ok.” (con un avance de línea al final) - -``` bash -$ curl 'http://localhost:8123/' -Ok. -``` - -Use la solicitud GET / ping en los scripts de comprobación de estado. Este controlador siempre devuelve “Ok.” (con un avance de línea al final). Disponible a partir de la versión 18.12.13. - -``` bash -$ curl 'http://localhost:8123/ping' -Ok. -``` - -Enviar la solicitud como una URL ‘query’ parámetro, o como un POST. O envíe el comienzo de la consulta en el ‘query’ parámetro, y el resto en el POST (explicaremos más adelante por qué esto es necesario). El tamaño de la URL está limitado a 16 KB, así que tenga esto en cuenta al enviar consultas grandes. - -Si tiene éxito, recibirá el código de respuesta 200 y el resultado en el cuerpo de respuesta. -Si se produce un error, recibirá el código de respuesta 500 y un texto de descripción de error en el cuerpo de la respuesta. - -Al usar el método GET, ‘readonly’ se establece. En otras palabras, para consultas que modifican datos, solo puede usar el método POST. Puede enviar la consulta en sí misma en el cuerpo POST o en el parámetro URL. - -Ejemplos: - -``` bash -$ curl 'http://localhost:8123/?query=SELECT%201' -1 - -$ wget -nv -O- 'http://localhost:8123/?query=SELECT 1' -1 - -$ echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123 -HTTP/1.0 200 OK -Date: Wed, 27 Nov 2019 10:30:18 GMT -Connection: Close -Content-Type: text/tab-separated-values; charset=UTF-8 -X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal -X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f -X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} - -1 -``` - -Como puede ver, curl es algo inconveniente ya que los espacios deben ser URL escapadas. -Aunque wget escapa de todo en sí, no recomendamos usarlo porque no funciona bien sobre HTTP 1.1 cuando se usa keep-alive y Transfer-Encoding: chunked . - -``` bash -$ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @- -1 - -$ echo 'SELECT 1' | curl 'http://localhost:8123/?query=' --data-binary @- -1 - -$ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @- -1 -``` - -Si se envía parte de la consulta en el parámetro y parte en el POST, se inserta un avance de línea entre estas dos partes de datos. -Ejemplo (esto no funcionará): - -``` bash -$ echo 'ECT 1' | curl 'http://localhost:8123/?query=SEL' --data-binary @- -Code: 59, e.displayText() = DB::Exception: Syntax error: failed at position 0: SEL -ECT 1 -, expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception -``` - -De forma predeterminada, los datos se devuelven en formato TabSeparated (para obtener más información, “Formats” apartado). -Utilice la cláusula FORMAT de la consulta para solicitar cualquier otro formato. - -``` bash -$ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @- -┏━━━┓ -┃ 1 ┃ -┡━━━┩ -│ 1 │ -└───┘ -``` - -El método POST de transmitir datos es necesario para las consultas INSERT. En este caso, puede escribir el comienzo de la consulta en el parámetro URL y usar POST para pasar los datos a insertar. Los datos a insertar podrían ser, por ejemplo, un volcado separado por tabuladores de MySQL. De esta manera, la consulta INSERT reemplaza LOAD DATA LOCAL INFILE de MySQL. - -Ejemplos: Crear una tabla: - -``` bash -$ echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @- -``` - -Uso de la consulta INSERT familiar para la inserción de datos: - -``` bash -$ echo 'INSERT INTO t VALUES (1),(2),(3)' | curl 'http://localhost:8123/' --data-binary @- -``` - -Los datos se pueden enviar por separado de la consulta: - -``` bash -$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- -``` - -Puede especificar cualquier formato de datos. El ‘Values’ el formato es el mismo que el que se usa al escribir INSERT INTO t VALUES: - -``` bash -$ echo '(7),(8),(9)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20Values' --data-binary @- -``` - -Para insertar datos de un volcado separado por tabuladores, especifique el formato correspondiente: - -``` bash -$ echo -ne '10\n11\n12\n' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20TabSeparated' --data-binary @- -``` - -Lectura del contenido de la tabla. Los datos se emiten en orden aleatorio debido al procesamiento de consultas paralelas: - -``` bash -$ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t' -7 -8 -9 -10 -11 -12 -1 -2 -3 -4 -5 -6 -``` - -Eliminando la mesa. - -``` bash -$ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- -``` - -Para las solicitudes correctas que no devuelven una tabla de datos, se devuelve un cuerpo de respuesta vacío. - -Puede utilizar el formato interno de compresión ClickHouse al transmitir datos. Los datos comprimidos tienen un formato no estándar, y deberá usar el `clickhouse-compressor` programa para trabajar con él (se instala con el `clickhouse-client` paquete). Para aumentar la eficiencia de la inserción de datos, puede deshabilitar la verificación de suma de comprobación [http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress) configuración. - -Si ha especificado `compress=1` en la URL, el servidor comprime los datos que le envía. -Si ha especificado `decompress=1` en la dirección URL, el servidor descomprime los mismos datos que `POST` método. - -También puede optar por utilizar [Compresión HTTP](https://en.wikipedia.org/wiki/HTTP_compression). Para enviar un `POST` solicitud, agregue el encabezado de solicitud `Content-Encoding: compression_method`. Para que ClickHouse comprima la respuesta, debe agregar `Accept-Encoding: compression_method`. Soporta ClickHouse `gzip`, `br`, y `deflate` [métodos de compresión](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens). Para habilitar la compresión HTTP, debe usar ClickHouse [enable_http_compression](../operations/settings/settings.md#settings-enable_http_compression) configuración. Puede configurar el nivel de compresión de datos [http_zlib_compression_level](#settings-http_zlib_compression_level) para todos los métodos de compresión. - -Puede usar esto para reducir el tráfico de red al transmitir una gran cantidad de datos o para crear volcados que se comprimen inmediatamente. - -Ejemplos de envío de datos con compresión: - -``` bash -#Sending data to the server: -$ curl -vsS "http://localhost:8123/?enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10' -H 'Accept-Encoding: gzip' - -#Sending data to the client: -$ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gzip' 'http://localhost:8123/' -``` - -!!! note "Nota" - Algunos clientes HTTP pueden descomprimir datos del servidor de forma predeterminada (con `gzip` y `deflate`) y puede obtener datos descomprimidos incluso si usa la configuración de compresión correctamente. - -Puede usar el ‘database’ Parámetro URL para especificar la base de datos predeterminada. - -``` bash -$ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @- -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -``` - -De forma predeterminada, la base de datos que está registrada en la configuración del servidor se utiliza como base de datos predeterminada. De forma predeterminada, esta es la base de datos llamada ‘default’. Como alternativa, siempre puede especificar la base de datos utilizando un punto antes del nombre de la tabla. - -El nombre de usuario y la contraseña se pueden indicar de una de estas tres maneras: - -1. Uso de la autenticación básica HTTP. Ejemplo: - - - -``` bash -$ echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @- -``` - -1. En el ‘user’ y ‘password’ Parámetros de URL. Ejemplo: - - - -``` bash -$ echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @- -``` - -1. Utilizar ‘X-ClickHouse-User’ y ‘X-ClickHouse-Key’ cabecera. Ejemplo: - - - -``` bash -$ echo 'SELECT 1' | curl -H 'X-ClickHouse-User: user' -H 'X-ClickHouse-Key: password' 'http://localhost:8123/' -d @- -``` - -Si no se especifica el nombre de usuario, `default` se utiliza el nombre. Si no se especifica la contraseña, se utiliza la contraseña vacía. -También puede utilizar los parámetros de URL para especificar cualquier configuración para procesar una sola consulta o perfiles completos de configuración. Ejemplo:http://localhost:8123/?perfil=web&max_rows_to_read=1000000000&consulta=SELECCIONA+1 - -Para obtener más información, consulte [Configuración](../operations/settings/index.md) apartado. - -``` bash -$ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:8123/?' --data-binary @- -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -``` - -Para obtener información sobre otros parámetros, consulte la sección “SET”. - -Del mismo modo, puede utilizar sesiones ClickHouse en el protocolo HTTP. Para hacer esto, debe agregar el `session_id` GET parámetro a la solicitud. Puede usar cualquier cadena como ID de sesión. De forma predeterminada, la sesión finaliza después de 60 segundos de inactividad. Para cambiar este tiempo de espera, modifique `default_session_timeout` configuración en la configuración del servidor, o `session_timeout` GET parámetro a la solicitud. Para comprobar el estado de la sesión, `session_check=1` parámetro. Solo se puede ejecutar una consulta a la vez en una sola sesión. - -Puede recibir información sobre el progreso de una consulta en `X-ClickHouse-Progress` encabezados de respuesta. Para hacer esto, habilite [send_progress_in_http_headers](../operations/settings/settings.md#settings-send_progress_in_http_headers). Ejemplo de la secuencia de encabezado: - -``` text -X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128"} -X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128"} -X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128"} -``` - -Posibles campos de encabezado: - -- `read_rows` — Number of rows read. -- `read_bytes` — Volume of data read in bytes. -- `total_rows_to_read` — Total number of rows to be read. -- `written_rows` — Number of rows written. -- `written_bytes` — Volume of data written in bytes. - -Las solicitudes en ejecución no se detienen automáticamente si se pierde la conexión HTTP. El análisis y el formato de datos se realizan en el lado del servidor, y el uso de la red puede ser ineficaz. -Opcional ‘query_id’ parámetro se puede pasar como el ID de consulta (cualquier cadena). Para obtener más información, consulte la sección “Settings, replace_running_query”. - -Opcional ‘quota_key’ parámetro se puede pasar como la clave de cuota (cualquier cadena). Para obtener más información, consulte la sección “Quotas”. - -La interfaz HTTP permite pasar datos externos (tablas temporales externas) para consultar. Para obtener más información, consulte la sección “External data for query processing”. - -## Almacenamiento en búfer de respuesta {#response-buffering} - -Puede habilitar el almacenamiento en búfer de respuestas en el lado del servidor. El `buffer_size` y `wait_end_of_query` Los parámetros URL se proporcionan para este propósito. - -`buffer_size` determina el número de bytes en el resultado para almacenar en búfer en la memoria del servidor. Si un cuerpo de resultado es mayor que este umbral, el búfer se escribe en el canal HTTP y los datos restantes se envían directamente al canal HTTP. - -Para asegurarse de que toda la respuesta se almacena en búfer, establezca `wait_end_of_query=1`. En este caso, los datos que no se almacenan en la memoria se almacenarán en un archivo de servidor temporal. - -Ejemplo: - -``` bash -$ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary' -``` - -Utilice el almacenamiento en búfer para evitar situaciones en las que se produjo un error de procesamiento de consultas después de enviar al cliente el código de respuesta y los encabezados HTTP. En esta situación, se escribe un mensaje de error al final del cuerpo de la respuesta y, en el lado del cliente, el error solo se puede detectar en la etapa de análisis. - -### Consultas con parámetros {#cli-queries-with-parameters} - -Puede crear una consulta con parámetros y pasar valores para ellos desde los parámetros de solicitud HTTP correspondientes. Para obtener más información, consulte [Consultas con parámetros para CLI](cli.md#cli-queries-with-parameters). - -### Ejemplo {#example} - -``` bash -$ curl -sS "
?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}" -``` - -## Interfaz HTTP predefinida {#predefined_http_interface} - -ClickHouse admite consultas específicas a través de la interfaz HTTP. Por ejemplo, puede escribir datos en una tabla de la siguiente manera: - -``` bash -$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- -``` - -ClickHouse también es compatible con la interfaz HTTP predefinida que puede ayudarle a una integración más fácil con herramientas de terceros como [Prometheus exportador](https://github.com/percona-lab/clickhouse_exporter). - -Ejemplo: - -- En primer lugar, agregue esta sección al archivo de configuración del servidor: - - - -``` xml - - - /predefined_query - POST,GET - - predefined_query_handler - SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' - - - ... - ... - -``` - -- Ahora puede solicitar la url directamente para los datos en el formato Prometheus: - - - -``` bash -$ curl -v 'http://localhost:8123/predefined_query' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /predefined_query HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Date: Tue, 28 Apr 2020 08:52:56 GMT -< Connection: Keep-Alive -< Content-Type: text/plain; charset=UTF-8 -< X-ClickHouse-Server-Display-Name: i-mloy5trc -< Transfer-Encoding: chunked -< X-ClickHouse-Query-Id: 96fe0052-01e6-43ce-b12a-6b7370de6e8a -< X-ClickHouse-Format: Template -< X-ClickHouse-Timezone: Asia/Shanghai -< Keep-Alive: timeout=3 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} -< -# HELP "Query" "Number of executing queries" -# TYPE "Query" counter -"Query" 1 - -# HELP "Merge" "Number of executing background merges" -# TYPE "Merge" counter -"Merge" 0 - -# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)" -# TYPE "PartMutation" counter -"PartMutation" 0 - -# HELP "ReplicatedFetch" "Number of data parts being fetched from replica" -# TYPE "ReplicatedFetch" counter -"ReplicatedFetch" 0 - -# HELP "ReplicatedSend" "Number of data parts being sent to replicas" -# TYPE "ReplicatedSend" counter -"ReplicatedSend" 0 - -* Connection #0 to host localhost left intact - - -* Connection #0 to host localhost left intact -``` - -Como puede ver en el ejemplo, si `` está configurado en la configuración.archivo xml y `` puede contener muchos `s`. ClickHouse coincidirá con las solicitudes HTTP recibidas con el tipo predefinido en `` y el primer emparejado ejecuta el controlador. Luego, ClickHouse ejecutará la consulta predefinida correspondiente si la coincidencia es exitosa. - -> Ahora `` puede configurar ``, ``, ``,``: -> `` es responsable de hacer coincidir la parte del método de la solicitud HTTP. `` se ajusta plenamente a la definición de [método](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) en el protocolo HTTP. Es una configuración opcional. Si no está definido en el archivo de configuración, no coincide con la parte del método de la solicitud HTTP. -> -> `` es responsable de hacer coincidir la parte url de la solicitud HTTP. Es compatible con [RE2](https://github.com/google/re2)expresiones regulares. Es una configuración opcional. Si no está definido en el archivo de configuración, no coincide con la parte url de la solicitud HTTP. -> -> `` es responsable de hacer coincidir la parte del encabezado de la solicitud HTTP. Es compatible con las expresiones regulares de RE2. Es una configuración opcional. Si no está definido en el archivo de configuración, no coincide con la parte de encabezado de la solicitud HTTP. -> -> `` contiene la parte de procesamiento principal. Ahora `` puede configurar ``, ``, ``, ``, ``, ``. -> \> `` Actualmente soporta tres tipos: **Dirección de correo electrónico**, **Nombre de la red inalámbrica (SSID):**, **estática**. -> \> -> \> `` - utilizar con el tipo predefined_query_handler, ejecuta la consulta cuando se llama al controlador. -> \> -> \> `` - utilizar con el tipo dynamic_query_handler, extrae y ejecuta el valor correspondiente al `` valor en parámetros de solicitud HTTP. -> \> -> \> `` - uso con tipo estático, código de estado de respuesta. -> \> -> \> `` - uso con tipo estático, respuesta [tipo de contenido](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type). -> \> -> \> `` - uso con tipo estático, contenido de respuesta enviado al cliente, cuando se usa el prefijo ‘file://’ o ‘config://’, encontrar el contenido del archivo o configuración enviar al cliente. - -A continuación están los métodos de configuración para los diferentes ``. - -## Dirección de correo electrónico {#predefined_query_handler} - -`` admite la configuración de valores Settings y query_params. Puede configurar `` en el tipo de ``. - -`` valor es una consulta predefinida de ``, que es ejecutado por ClickHouse cuando se hace coincidir una solicitud HTTP y se devuelve el resultado de la consulta. Es una configuración imprescindible. - -En el ejemplo siguiente se definen los valores de `max_threads` y `max_alter_threads` configuración, a continuación, consulta la tabla del sistema para comprobar si estos ajustes se han establecido correctamente. - -Ejemplo: - -``` xml - - - [^/]+)(/(?P[^/]+))?]]> - GET - - TEST_HEADER_VALUE - [^/]+)(/(?P[^/]+))?]]> - - - predefined_query_handler - SELECT value FROM system.settings WHERE name = {name_1:String} - SELECT name, value FROM system.settings WHERE name = {name_2:String} - - - -``` - -``` bash -$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2' -1 -max_alter_threads 2 -``` - -!!! note "precaución" - En uno `` sólo es compatible con uno `` de un tipo de plaquita. - -## Nombre de la red inalámbrica (SSID): {#dynamic_query_handler} - -En ``, consulta se escribe en forma de param de la solicitud HTTP. La diferencia es que en ``, consulta se escribe en el archivo de configuración. Puede configurar `` en ``. - -ClickHouse extrae y ejecuta el valor correspondiente al `` valor en la url de la solicitud HTTP. El valor predeterminado de `` ser `/query` . Es una configuración opcional. Si no hay una definición en el archivo de configuración, el parámetro no se pasa. - -Para experimentar con esta funcionalidad, el ejemplo define los valores de max_threads y max_alter_threads y consulta si la configuración se estableció correctamente. - -Ejemplo: - -``` xml - - - - TEST_HEADER_VALUE_DYNAMIC - - dynamic_query_handler - query_param - - - -``` - -``` bash -$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' 'http://localhost:8123/own?max_threads=1&max_alter_threads=2¶m_name_1=max_threads¶m_name_2=max_alter_threads&query_param=SELECT%20name,value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D' -max_threads 1 -max_alter_threads 2 -``` - -## estática {#static} - -`` puede volver [Content_type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type), [estatus](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status) y response_content. response_content puede devolver el contenido especificado - -Ejemplo: - -Devuelve un mensaje. - -``` xml - - - GET - xxx - /hi - - static - 402 - text/html; charset=UTF-8 - Say Hi! - - - -``` - -``` bash -$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /hi HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> XXX:xxx -> -< HTTP/1.1 402 Payment Required -< Date: Wed, 29 Apr 2020 03:51:26 GMT -< Connection: Keep-Alive -< Content-Type: text/html; charset=UTF-8 -< Transfer-Encoding: chunked -< Keep-Alive: timeout=3 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} -< -* Connection #0 to host localhost left intact -Say Hi!% -``` - -Busque el contenido de la configuración enviada al cliente. - -``` xml -
]]>
- - - - GET - xxx - /get_config_static_handler - - static - config://get_config_static_handler - - - -``` - -``` bash -$ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /get_config_static_handler HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> XXX:xxx -> -< HTTP/1.1 200 OK -< Date: Wed, 29 Apr 2020 04:01:24 GMT -< Connection: Keep-Alive -< Content-Type: text/plain; charset=UTF-8 -< Transfer-Encoding: chunked -< Keep-Alive: timeout=3 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} -< -* Connection #0 to host localhost left intact -
% -``` - -Encuentra el contenido del archivo enviado al cliente. - -``` xml - - - GET - xxx - /get_absolute_path_static_handler - - static - text/html; charset=UTF-8 - file:///absolute_path_file.html - - - - GET - xxx - /get_relative_path_static_handler - - static - text/html; charset=UTF-8 - file://./relative_path_file.html - - - -``` - -``` bash -$ user_files_path='/var/lib/clickhouse/user_files' -$ sudo echo "Relative Path File" > $user_files_path/relative_path_file.html -$ sudo echo "Absolute Path File" > $user_files_path/absolute_path_file.html -$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /get_absolute_path_static_handler HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> XXX:xxx -> -< HTTP/1.1 200 OK -< Date: Wed, 29 Apr 2020 04:18:16 GMT -< Connection: Keep-Alive -< Content-Type: text/html; charset=UTF-8 -< Transfer-Encoding: chunked -< Keep-Alive: timeout=3 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} -< -Absolute Path File -* Connection #0 to host localhost left intact -$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' -* Trying ::1... -* Connected to localhost (::1) port 8123 (#0) -> GET /get_relative_path_static_handler HTTP/1.1 -> Host: localhost:8123 -> User-Agent: curl/7.47.0 -> Accept: */* -> XXX:xxx -> -< HTTP/1.1 200 OK -< Date: Wed, 29 Apr 2020 04:18:31 GMT -< Connection: Keep-Alive -< Content-Type: text/html; charset=UTF-8 -< Transfer-Encoding: chunked -< Keep-Alive: timeout=3 -< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} -< -Relative Path File -* Connection #0 to host localhost left intact -``` - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/http_interface/) diff --git a/docs/es/interfaces/index.md b/docs/es/interfaces/index.md deleted file mode 100644 index 3632c8a9e29..00000000000 --- a/docs/es/interfaces/index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Interfaz -toc_priority: 14 -toc_title: "Implantaci\xF3n" ---- - -# Interfaz {#interfaces} - -ClickHouse proporciona dos interfaces de red (ambas se pueden ajustar opcionalmente en TLS para mayor seguridad): - -- [HTTP](http.md), que está documentado y fácil de usar directamente. -- [TCP nativo](tcp.md), que tiene menos sobrecarga. - -En la mayoría de los casos, se recomienda utilizar la herramienta o biblioteca apropiada en lugar de interactuar con ellos directamente. Oficialmente apoyados por Yandex son los siguientes: - -- [Cliente de línea de comandos](cli.md) -- [Controlador JDBC](jdbc.md) -- [Controlador ODBC](odbc.md) -- [Biblioteca cliente de C++](cpp.md) - -También hay una amplia gama de bibliotecas de terceros para trabajar con ClickHouse: - -- [Bibliotecas de clientes](third-party/client-libraries.md) -- [Integración](third-party/integrations.md) -- [Interfaces visuales](third-party/gui.md) - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/) diff --git a/docs/es/interfaces/jdbc.md b/docs/es/interfaces/jdbc.md deleted file mode 100644 index 7303dec8960..00000000000 --- a/docs/es/interfaces/jdbc.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 22 -toc_title: Controlador JDBC ---- - -# Controlador JDBC {#jdbc-driver} - -- **[Conductor oficial](https://github.com/ClickHouse/clickhouse-jdbc)** -- Controladores de terceros: - - [Sistema abierto.](https://github.com/housepower/ClickHouse-Native-JDBC) - - [Método de codificación de datos:](https://github.com/blynkkk/clickhouse4j) - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/jdbc/) diff --git a/docs/es/interfaces/mysql.md b/docs/es/interfaces/mysql.md deleted file mode 100644 index a5124c61dd5..00000000000 --- a/docs/es/interfaces/mysql.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 20 -toc_title: Interfaz MySQL ---- - -# Interfaz MySQL {#mysql-interface} - -ClickHouse soporta el protocolo de cable MySQL. Puede ser habilitado por [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) configuración en el archivo de configuración: - -``` xml -9004 -``` - -Ejemplo de conexión mediante la herramienta de línea de comandos `mysql`: - -``` bash -$ mysql --protocol tcp -u default -P 9004 -``` - -Salida si una conexión se realizó correctamente: - -``` text -Welcome to the MySQL monitor. Commands end with ; or \g. -Your MySQL connection id is 4 -Server version: 20.2.1.1-ClickHouse - -Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. - -Oracle is a registered trademark of Oracle Corporation and/or its -affiliates. Other names may be trademarks of their respective -owners. - -Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. - -mysql> -``` - -Para la compatibilidad con todos los clientes MySQL, se recomienda especificar la contraseña de usuario con [doble SHA1](../operations/settings/settings-users.md#password_double_sha1_hex) en el archivo de configuración. -Si la contraseña de usuario se especifica usando [SHA256](../operations/settings/settings-users.md#password_sha256_hex), algunos clientes no podrán autenticarse (mysqljs y versiones antiguas de la herramienta de línea de comandos mysql). - -Restricción: - -- las consultas preparadas no son compatibles - -- algunos tipos de datos se envían como cadenas - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/mysql/) diff --git a/docs/es/interfaces/odbc.md b/docs/es/interfaces/odbc.md deleted file mode 100644 index 6ccb979c7f7..00000000000 --- a/docs/es/interfaces/odbc.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 23 -toc_title: Conductor ODBC ---- - -# Conductor ODBC {#odbc-driver} - -- [Conductor oficial](https://github.com/ClickHouse/clickhouse-odbc). - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/odbc/) diff --git a/docs/es/interfaces/tcp.md b/docs/es/interfaces/tcp.md deleted file mode 100644 index 47df0d12829..00000000000 --- a/docs/es/interfaces/tcp.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 18 -toc_title: Interfaz nativa (TCP) ---- - -# Interfaz nativa (TCP) {#native-interface-tcp} - -El protocolo nativo se utiliza en el [cliente de línea de comandos](cli.md), para la comunicación entre servidores durante el procesamiento de consultas distribuidas, y también en otros programas de C, Desafortunadamente, el protocolo nativo de ClickHouse aún no tiene especificaciones formales, pero puede ser diseñado de manera inversa desde el código fuente de ClickHouse (comenzando [por aquí](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) y/o mediante la interceptación y el análisis del tráfico TCP. - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/es/interfaces/third-party/client-libraries.md b/docs/es/interfaces/third-party/client-libraries.md deleted file mode 100644 index b61ab1a5d9c..00000000000 --- a/docs/es/interfaces/third-party/client-libraries.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -toc_priority: 26 -toc_title: Client Libraries ---- - -# Client Libraries from Third-party Developers {#client-libraries-from-third-party-developers} - -!!! warning "Disclaimer" - Yandex does **not** maintain the libraries listed below and haven’t done any extensive testing to ensure their quality. - -- Python - - [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm) - - [clickhouse-driver](https://github.com/mymarilyn/clickhouse-driver) - - [clickhouse-client](https://github.com/yurial/clickhouse-client) - - [aiochclient](https://github.com/maximdanilchenko/aiochclient) - - [asynch](https://github.com/long2ice/asynch) -- PHP - - [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) - - [8bitov/clickhouse-php-client](https://packagist.org/packages/8bitov/clickhouse-php-client) - - [bozerkins/clickhouse-client](https://packagist.org/packages/bozerkins/clickhouse-client) - - [simpod/clickhouse-client](https://packagist.org/packages/simpod/clickhouse-client) - - [seva-code/php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client) - - [SeasClick C++ client](https://github.com/SeasX/SeasClick) -- Go - - [clickhouse](https://github.com/kshvakov/clickhouse/) - - [go-clickhouse](https://github.com/roistat/go-clickhouse) - - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse) - - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) -- NodeJs - - [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse) - - [node-clickhouse](https://github.com/apla/node-clickhouse) -- Perl - - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) - - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) - - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) -- Ruby - - [ClickHouse (Ruby)](https://github.com/shlima/click_house) - - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) -- R - - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - - [RClickHouse](https://github.com/IMSMWU/RClickHouse) -- Java - - [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java) - - [clickhouse-client](https://github.com/Ecwid/clickhouse-client) -- Scala - - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) -- Kotlin - - [AORM](https://github.com/TanVD/AORM) -- C# - - [Octonica.ClickHouseClient](https://github.com/Octonica/ClickHouseClient) - - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) - - [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client) - - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) -- Elixir - - [clickhousex](https://github.com/appodeal/clickhousex/) - - [pillar](https://github.com/sofakingworld/pillar) -- Nim - - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) - -[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/client_libraries/) diff --git a/docs/es/interfaces/third-party/gui.md b/docs/es/interfaces/third-party/gui.md deleted file mode 100644 index 754c0f68c69..00000000000 --- a/docs/es/interfaces/third-party/gui.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 28 -toc_title: Interfaces Visuales ---- - -# Interfaces visuales de desarrolladores de terceros {#visual-interfaces-from-third-party-developers} - -## De código abierto {#open-source} - -### Tabix {#tabix} - -Interfaz web para ClickHouse en el [Tabix](https://github.com/tabixio/tabix) proyecto. - -Función: - -- Funciona con ClickHouse directamente desde el navegador, sin la necesidad de instalar software adicional. -- Editor de consultas con resaltado de sintaxis. -- Autocompletado de comandos. -- Herramientas para el análisis gráfico de la ejecución de consultas. -- Opciones de esquema de color. - -[Documentación de Tabix](https://tabix.io/doc/). - -### Sistema abierto {#houseops} - -[Sistema abierto.](https://github.com/HouseOps/HouseOps) Es una interfaz de usuario / IDE para OSX, Linux y Windows. - -Función: - -- Generador de consultas con resaltado de sintaxis. Ver la respuesta en una tabla o vista JSON. -- Exportar resultados de consultas como CSV o JSON. -- Lista de procesos con descripciones. Modo de escritura. Capacidad de parar (`KILL`) proceso. -- Gráfico de base de datos. Muestra todas las tablas y sus columnas con información adicional. -- Una vista rápida del tamaño de la columna. -- Configuración del servidor. - -Las siguientes características están planificadas para el desarrollo: - -- Gestión de bases de datos. -- Gestión de usuarios. -- Análisis de datos en tiempo real. -- Supervisión de clúster. -- Gestión de clústeres. -- Monitoreo de tablas replicadas y Kafka. - -### Faro {#lighthouse} - -[Faro](https://github.com/VKCOM/lighthouse) Es una interfaz web ligera para ClickHouse. - -Función: - -- Lista de tablas con filtrado y metadatos. -- Vista previa de la tabla con filtrado y clasificación. -- Ejecución de consultas de sólo lectura. - -### Redash {#redash} - -[Redash](https://github.com/getredash/redash) es una plataforma para la visualización de datos. - -Admite múltiples fuentes de datos, incluido ClickHouse, Redash puede unir los resultados de consultas de diferentes fuentes de datos en un conjunto de datos final. - -Función: - -- Potente editor de consultas. -- Explorador de base de datos. -- Herramientas de visualización, que le permiten representar datos en diferentes formas. - -### DBeaver {#dbeaver} - -[DBeaver](https://dbeaver.io/) - Cliente de base de datos de escritorio universal con soporte ClickHouse. - -Función: - -- Desarrollo de consultas con resaltado de sintaxis y autocompletado. -- Lista de tablas con filtros y búsqueda de metadatos. -- Vista previa de datos de tabla. -- Búsqueda de texto completo. - -### Sistema abierto {#clickhouse-cli} - -[Sistema abierto.](https://github.com/hatarist/clickhouse-cli) es un cliente de línea de comandos alternativo para ClickHouse, escrito en Python 3. - -Función: - -- Autocompletado. -- Resaltado de sintaxis para las consultas y la salida de datos. -- Soporte de buscapersonas para la salida de datos. -- Comandos similares a PostgreSQL personalizados. - -### Sistema abierto {#clickhouse-flamegraph} - -[Sistema abierto.](https://github.com/Slach/clickhouse-flamegraph) es una herramienta especializada para visualizar el `system.trace_log` como [Flamegraph](http://www.brendangregg.com/flamegraphs.html). - -### Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica {#clickhouse-plantuml} - -[Método de codificación de datos:](https://pypi.org/project/clickhouse-plantuml/) es un script para generar [PlantUML](https://plantuml.com/) diagrama de esquemas de tablas. - -## Comercial {#commercial} - -### DataGrip {#datagrip} - -[DataGrip](https://www.jetbrains.com/datagrip/) Es un IDE de base de datos de JetBrains con soporte dedicado para ClickHouse. También está integrado en otras herramientas basadas en IntelliJ: PyCharm, IntelliJ IDEA, GoLand, PhpStorm y otros. - -Función: - -- Finalización de código muy rápida. -- Resaltado de sintaxis de ClickHouse. -- Soporte para características específicas de ClickHouse, por ejemplo, columnas anidadas, motores de tablas. -- Editor de datos. -- Refactorizaciones. -- Búsqueda y navegación. - -### Yandex DataLens {#yandex-datalens} - -[Yandex DataLens](https://cloud.yandex.ru/services/datalens) es un servicio de visualización y análisis de datos. - -Función: - -- Amplia gama de visualizaciones disponibles, desde simples gráficos de barras hasta paneles complejos. -- Los paneles podrían ponerse a disposición del público. -- Soporte para múltiples fuentes de datos, incluyendo ClickHouse. -- Almacenamiento de datos materializados basados en ClickHouse. - -Nivel de Cifrado WEP [disponible de forma gratuita](https://cloud.yandex.com/docs/datalens/pricing) para proyectos de baja carga, incluso para uso comercial. - -- [Documentación de DataLens](https://cloud.yandex.com/docs/datalens/). -- [Tutorial](https://cloud.yandex.com/docs/solutions/datalens/data-from-ch-visualization) en la visualización de datos de una base de datos ClickHouse. - -### Software de Holística {#holistics-software} - -[Holística](https://www.holistics.io/) es una plataforma de datos de pila completa y una herramienta de inteligencia de negocios. - -Función: - -- Correo electrónico automatizado, Slack y horarios de informes de Google Sheet. -- Editor SQL con visualizaciones, control de versiones, autocompletado, componentes de consulta reutilizables y filtros dinámicos. -- Análisis integrado de informes y cuadros de mando a través de iframe. -- Preparación de datos y capacidades ETL. -- Soporte de modelado de datos SQL para mapeo relacional de datos. - -### Mirador {#looker} - -[Mirador](https://looker.com) Es una plataforma de datos y una herramienta de inteligencia de negocios con soporte para más de 50 dialectos de bases de datos, incluido ClickHouse. Bravo está disponible como una plataforma SaaS y auto-organizada. Los usuarios pueden utilizar Looker a través del navegador para explorar datos, crear visualizaciones y paneles, programar informes y compartir sus conocimientos con colegas. Looker proporciona un amplio conjunto de herramientas para incrustar estas características en otras aplicaciones y una API -para integrar datos con otras aplicaciones. - -Función: - -- Desarrollo fácil y ágil utilizando LookML, un lenguaje que soporta curado - [Modelado de datos](https://looker.com/platform/data-modeling) para apoyar a los redactores de informes y a los usuarios finales. -- Potente integración de flujo de trabajo a través de Looker's [Acciones de datos](https://looker.com/platform/actions). - -[Cómo configurar ClickHouse en Looker.](https://docs.looker.com/setup-and-management/database-config/clickhouse) - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/es/interfaces/third-party/index.md b/docs/es/interfaces/third-party/index.md deleted file mode 100644 index adf50b05cdf..00000000000 --- a/docs/es/interfaces/third-party/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: tercero -toc_priority: 24 ---- - - diff --git a/docs/es/interfaces/third-party/integrations.md b/docs/es/interfaces/third-party/integrations.md deleted file mode 100644 index 7588bef0230..00000000000 --- a/docs/es/interfaces/third-party/integrations.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -toc_priority: 27 -toc_title: Integrations ---- - -# Integration Libraries from Third-party Developers {#integration-libraries-from-third-party-developers} - -!!! warning "Disclaimer" - Yandex does **not** maintain the tools and libraries listed below and haven’t done any extensive testing to ensure their quality. - -## Infrastructure Products {#infrastructure-products} - -- Relational database management systems - - [MySQL](https://www.mysql.com) - - [mysql2ch](https://github.com/long2ice/mysql2ch) - - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) - - [PostgreSQL](https://www.postgresql.org) - - [clickhousedb_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) - - [infi.clickhouse_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - - [pg2ch](https://github.com/mkabilov/pg2ch) - - [clickhouse_fdw](https://github.com/adjust/clickhouse_fdw) - - [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server) - - [ClickHouseMigrator](https://github.com/zlzforever/ClickHouseMigrator) -- Message queues - - [Kafka](https://kafka.apache.org) - - [clickhouse_sinker](https://github.com/housepower/clickhouse_sinker) (uses [Go client](https://github.com/ClickHouse/clickhouse-go/)) - - [stream-loader-clickhouse](https://github.com/adform/stream-loader) -- Stream processing - - [Flink](https://flink.apache.org) - - [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink) -- Object storages - - [S3](https://en.wikipedia.org/wiki/Amazon_S3) - - [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup) -- Container orchestration - - [Kubernetes](https://kubernetes.io) - - [clickhouse-operator](https://github.com/Altinity/clickhouse-operator) -- Configuration management - - [puppet](https://puppet.com) - - [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse) - - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) -- Monitoring - - [Graphite](https://graphiteapp.org) - - [graphouse](https://github.com/yandex/graphouse) - - [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) + - - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse) - - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied - - [Grafana](https://grafana.com/) - - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) - - [Prometheus](https://prometheus.io/) - - [clickhouse_exporter](https://github.com/f1yegor/clickhouse_exporter) - - [PromHouse](https://github.com/Percona-Lab/PromHouse) - - [clickhouse_exporter](https://github.com/hot-wifi/clickhouse_exporter) (uses [Go client](https://github.com/kshvakov/clickhouse/)) - - [Nagios](https://www.nagios.org/) - - [check_clickhouse](https://github.com/exogroup/check_clickhouse/) - - [check_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) - - [Zabbix](https://www.zabbix.com) - - [clickhouse-zabbix-template](https://github.com/Altinity/clickhouse-zabbix-template) - - [Sematext](https://sematext.com/) - - [clickhouse integration](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) -- Logging - - [rsyslog](https://www.rsyslog.com/) - - [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) - - [fluentd](https://www.fluentd.org) - - [loghouse](https://github.com/flant/loghouse) (for [Kubernetes](https://kubernetes.io)) - - [logagent](https://www.sematext.com/logagent) - - [logagent output-plugin-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) -- Geo - - [MaxMind](https://dev.maxmind.com/geoip/) - - [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) - -## Programming Language Ecosystems {#programming-language-ecosystems} - -- Python - - [SQLAlchemy](https://www.sqlalchemy.org) - - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - - [pandas](https://pandas.pydata.org) - - [pandahouse](https://github.com/kszucs/pandahouse) -- PHP - - [Doctrine](https://www.doctrine-project.org/) - - [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse) -- R - - [dplyr](https://db.rstudio.com/dplyr/) - - [RClickHouse](https://github.com/IMSMWU/RClickHouse) (uses [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) -- Java - - [Hadoop](http://hadoop.apache.org) - - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (uses [JDBC](../../sql-reference/table-functions/jdbc.md)) -- Scala - - [Akka](https://akka.io) - - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) -- C# - - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) - - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) - - [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client) - - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - - [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) -- Elixir - - [Ecto](https://github.com/elixir-ecto/ecto) - - [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto) -- Ruby - - [Ruby on Rails](https://rubyonrails.org/) - - [activecube](https://github.com/bitquery/activecube) - - [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord) - - [GraphQL](https://github.com/graphql) - - [activecube-graphql](https://github.com/bitquery/activecube-graphql) - -[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) diff --git a/docs/es/interfaces/third-party/proxy.md b/docs/es/interfaces/third-party/proxy.md deleted file mode 100644 index e1aabf8fce4..00000000000 --- a/docs/es/interfaces/third-party/proxy.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 29 -toc_title: Proxy ---- - -# Servidores proxy de desarrolladores de terceros {#proxy-servers-from-third-party-developers} - -## chproxy {#chproxy} - -[chproxy](https://github.com/Vertamedia/chproxy), es un proxy HTTP y equilibrador de carga para la base de datos ClickHouse. - -Función: - -- Enrutamiento por usuario y almacenamiento en caché de respuestas. -- Flexible límites. -- Renovación automática del certificado SSL. - -Implementado en Go. - -## Bienvenido a WordPress {#kittenhouse} - -[Bienvenido a WordPress.](https://github.com/VKCOM/kittenhouse) está diseñado para ser un proxy local entre ClickHouse y el servidor de aplicaciones en caso de que sea imposible o inconveniente almacenar los datos INSERT en el lado de su aplicación. - -Función: - -- Almacenamiento en búfer de datos en memoria y en disco. -- Enrutamiento por tabla. -- Equilibrio de carga y comprobación de estado. - -Implementado en Go. - -## Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica {#clickhouse-bulk} - -[Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/nikepan/clickhouse-bulk) es un simple colector de insertos ClickHouse. - -Función: - -- Agrupe las solicitudes y envíe por umbral o intervalo. -- Múltiples servidores remotos. -- Autenticación básica. - -Implementado en Go. - -[Artículo Original](https://clickhouse.tech/docs/en/interfaces/third-party/proxy/) diff --git a/docs/es/introduction/adopters.md b/docs/es/introduction/adopters.md deleted file mode 100644 index 4c0aa78d57b..00000000000 --- a/docs/es/introduction/adopters.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 8 -toc_title: Adoptante ---- - -# Adoptadores de ClickHouse {#clickhouse-adopters} - -!!! warning "Descargo" - La siguiente lista de empresas que utilizan ClickHouse y sus historias de éxito se recopila a partir de fuentes públicas, por lo que podría diferir de la realidad actual. Le agradeceríamos que compartiera la historia de adoptar ClickHouse en su empresa y [agregarlo a la lista](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), pero por favor asegúrese de que usted no tendrá ningunos problemas de NDA haciendo así. Proporcionar actualizaciones con publicaciones de otras compañías también es útil. - -| Empresa | Industria | Usecase | Tamaño de clúster | (Un)Tamaño de datos comprimidos\* | Referencia | -|-------------------------------------------------------------------------------------------------|------------------------------------|-----------------------------|------------------------------------------------------------------|-------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 2gis | Asignar | Monitoreo | — | — | [Charla en ruso, julio 2019](https://youtu.be/58sPkXfq6nw) | -| Aloha Browser | Aplicación móvil | Backend del navegador | — | — | [Diapositivas en ruso, mayo 2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | -| Amadeus | Viaje | Analítica | — | — | [Comunicado de prensa, abril de 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | -| Appsflyer | Análisis móvil | Producto principal | — | — | [Charla en ruso, julio 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | -| ArenaData | Plataforma de datos | Producto principal | — | — | [Diapositivas en ruso, diciembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | -| Badoo | Citas | Serie de tiempo | — | — | [Diapositivas en ruso, diciembre 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | -| Benocs | Telemetría y análisis de red | Producto principal | — | — | [Diapositivas en español, octubre de 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | -| Bloomberg | Finanzas, Medios | Monitoreo | 102 servidores | — | [Diapositivas, Mayo 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | -| Bloxy | Blockchain | Analítica | — | — | [Diapositivas en ruso, agosto 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | -| Dataliance para China Telecom | Telecomunicaciones | Analítica | — | — | [Diapositivas en chino, enero 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | -| CARTO | Inteligencia de negocios | Análisis geográfico | — | — | [Procesamiento geoespacial con ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | -| CERN | Investigación | Experimento | — | — | [Comunicado de prensa, abril de 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | -| Cisco | Red | Análisis de tráfico | — | — | [Charla relámpago, octubre 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | -| Citadel Securities | Financiación | — | — | — | [Contribución, marzo 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | -| Más información | Taxi | Analítica | — | — | [Blog Post en ruso, marzo 2020](https://habr.com/en/company/citymobil/blog/490660/) | -| ContentSquare | Análisis web | Producto principal | — | — | [Publicación de blog en francés, noviembre 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | -| Cloudflare | CDN | Análisis de tráfico | 36 servidores | — | [Mensaje del blog, Mayo 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Mensaje del blog, marzo 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | -| Corunet | Analítica | Producto principal | — | — | [Diapositivas en español, Abril 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | -| CraiditX 氪信 | Finanzas AI | Análisis | — | — | [Diapositivas en español, noviembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | -| Criteo | Menor | Producto principal | — | — | [Diapositivas en español, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | -| Deutsche Bank | Financiación | BI Analytics | — | — | [Diapositivas en español, octubre 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | -| Diva-e | Consultoría digital | Producto principal | — | — | [Diapositivas en español, septiembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | -| Exness | Comercio | Métricas, Registro | — | — | [Charla en ruso, mayo 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | -| Sistema abierto. | Red Ad | Producto principal | — | — | [Publicación de blog en japonés, julio 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | -| HUYA | Video Streaming | Analítica | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| Idealista | Inmobiliario | Analítica | — | — | [Blog Post en Inglés, Abril 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | -| Infovista | Red | Analítica | — | — | [Diapositivas en español, octubre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | -| InnoGames | Juego | Métricas, Registro | — | — | [Diapositivas en ruso, septiembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | -| Integros | Plataforma para servicios de video | Analítica | — | — | [Diapositivas en ruso, mayo 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| Datos de Kodiak | Nube | Producto principal | — | — | [Diapositivas en Engish, Abril 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | -| Kontur | Desarrollo de software | Métricas | — | — | [Charla en ruso, noviembre 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | -| Sistema abierto. | Red Ad | Producto principal | 75 servidores (3 réplicas) | 5.27 PiB | [Publicación de blog en ruso, febrero 2017](https://habr.com/en/post/322620/) | -| Soluciones en la nube de Mail.ru | Servicios en la nube | Producto principal | — | — | [Artículo en ruso](https://mcs.mail.ru/help/db-create/clickhouse#) | -| Mensaje de pájaro | Telecomunicaciones | Estadísticas | — | — | [Diapositivas en español, noviembre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| MGID | Red Ad | Analítica Web | — | — | [Publicación de blog en ruso, abril 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) | -| UnoAPM | Supervisión y análisis de datos | Producto principal | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | -| Pragma Innovation | Telemetría y Análisis de Big Data | Producto principal | — | — | [Diapositivas en español, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | -| QINGCLOUD | Servicios en la nube | Producto principal | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | -| Qrator | Protección DDoS | Producto principal | — | — | [Blog Post, marzo 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | -| Percent 百分点 | Analítica | Producto principal | — | — | [Diapositivas en chino, junio 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | -| Rambler | Servicios de Internet | Analítica | — | — | [Charla en ruso, abril 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | -| Tencent | Mensajería | Tala | — | — | [Charla en chino, noviembre 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | -| Traffic Stars | Red AD | — | — | — | [Diapositivas en ruso, mayo 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | -| S7 Airlines | Aérea | Métricas, Registro | — | — | [Charla en ruso, marzo 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | -| SEMrush | Marketing | Producto principal | — | — | [Diapositivas en ruso, agosto 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | -| scireum GmbH | Comercio electrónico | Producto principal | — | — | [Charla en alemán, febrero de 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | -| Centinela | Desarrollador de software | Backend para el producto | — | — | [Publicación de blog en inglés, mayo 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | -| SGK | Gobierno Seguridad Social | Analítica | — | — | [Diapositivas en español, noviembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | -| el seo.¿ | Analítica | Producto principal | — | — | [Diapositivas en español, noviembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | -| Sina | Noticia | — | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | -| SMI2 | Noticia | Analítica | — | — | [Blog Post en ruso, noviembre 2017](https://habr.com/ru/company/smi2/blog/314558/) | -| Salto | Análisis de negocios | Producto principal | — | — | [Diapositivas en español, enero 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | -| Spotify | Sica | Experimentación | — | — | [Diapositivas, julio 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | -| Tencent | Grandes Datos | Procesamiento de datos | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | -| Más información | Taxi | Tala | — | — | [Diapositivas, febrero de 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | -| VKontakte | Red social | Estadísticas, Registro | — | — | [Diapositivas en ruso, agosto 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | -| Método de codificación de datos: | Soluciones de TI | Analítica | — | — | [Diapositivas en ruso, mayo 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| Xiaoxin Tech | Educación | Propósito común | — | — | [Diapositivas en español, noviembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | -| Ximalaya | Compartir audio | OLAP | — | — | [Diapositivas en español, noviembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | -| Yandex Cloud | Nube pública | Producto principal | — | — | [Charla en ruso, diciembre 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | -| Yandex DataLens | Inteligencia de negocios | Producto principal | — | — | [Diapositivas en ruso, diciembre 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | -| Yandex Market | Comercio electrónico | Métricas, Registro | — | — | [Charla en ruso, enero 2019](https://youtu.be/_l1qP0DyBcA?t=478) | -| Yandex Metrica | Análisis web | Producto principal | 360 servidores en un clúster, 1862 servidores en un departamento | 66.41 PiB / 5.68 PiB | [Diapositivas, febrero de 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | -| ЦВТ | Desarrollo de software | Métricas, Registro | — | — | [Blog Post, marzo 2019, en ruso](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | -| МКБ | Banco | Supervisión del sistema web | — | — | [Diapositivas en ruso, septiembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | -| Jinshuju 金数据 | BI Analytics | Producto principal | — | — | [Diapositivas en chino, octubre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | -| Instana | Plataforma APM | Producto principal | — | — | [Publicación de Twitter](https://twitter.com/mieldonkers/status/1248884119158882304) | -| Wargaming | Juego | | — | — | [Entrevista](https://habr.com/en/post/496954/) | -| Crazypanda | Juego | | — | — | Sesión en vivo en ClickHouse meetup | -| FunCorp | Juego | | — | — | [Artículo](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) | - -[Artículo Original](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/es/introduction/distinctive-features.md b/docs/es/introduction/distinctive-features.md deleted file mode 100644 index 154b12a65e9..00000000000 --- a/docs/es/introduction/distinctive-features.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 4 -toc_title: "Caracter\xEDsticas distintivas" ---- - -# Características distintivas de ClickHouse {#distinctive-features-of-clickhouse} - -## DBMS orientado a columnas verdaderas {#true-column-oriented-dbms} - -En un verdadero DBMS orientado a columnas, no se almacenan datos adicionales con los valores. Entre otras cosas, esto significa que los valores de longitud constante deben ser compatibles, para evitar almacenar su longitud “number” al lado de los valores. Como ejemplo, mil millones de valores de tipo UInt8 deberían consumir alrededor de 1 GB sin comprimir, o esto afecta fuertemente el uso de la CPU. Es esencial almacenar los datos de forma compacta (sin “garbage”) incluso sin comprimir, ya que la velocidad de descompresión (uso de CPU) depende principalmente del volumen de datos sin comprimir. - -Vale la pena señalar porque hay sistemas que pueden almacenar valores de diferentes columnas por separado, pero que no pueden procesar efectivamente las consultas analíticas debido a su optimización para otros escenarios. Los ejemplos son HBase, BigTable, Cassandra e HyperTable. En estos sistemas, obtendría un rendimiento de alrededor de cien mil filas por segundo, pero no cientos de millones de filas por segundo. - -También vale la pena señalar que ClickHouse es un sistema de administración de bases de datos, no una sola base de datos. ClickHouse permite crear tablas y bases de datos en tiempo de ejecución, cargar datos y ejecutar consultas sin volver a configurar y reiniciar el servidor. - -## Compresión de datos {#data-compression} - -Algunos DBMS orientados a columnas (InfiniDB CE y MonetDB) no utilizan la compresión de datos. Sin embargo, la compresión de datos juega un papel clave para lograr un rendimiento excelente. - -## Almacenamiento en disco de datos {#disk-storage-of-data} - -Mantener los datos físicamente ordenados por clave principal permite extraer datos para sus valores específicos o rangos de valores con baja latencia, menos de unas pocas docenas de milisegundos. Algunos DBMS orientados a columnas (como SAP HANA y Google PowerDrill) solo pueden funcionar en RAM. Este enfoque fomenta la asignación de un presupuesto de hardware más grande que el necesario para el análisis en tiempo real. ClickHouse está diseñado para funcionar en discos duros normales, lo que significa que el costo por GB de almacenamiento de datos es bajo, pero SSD y RAM adicional también se utilizan completamente si están disponibles. - -## Procesamiento paralelo en varios núcleos {#parallel-processing-on-multiple-cores} - -Las consultas grandes se paralelizan naturalmente, tomando todos los recursos necesarios disponibles en el servidor actual. - -## Procesamiento distribuido en varios servidores {#distributed-processing-on-multiple-servers} - -Casi ninguno de los DBMS columnar mencionados anteriormente tiene soporte para el procesamiento de consultas distribuidas. -En ClickHouse, los datos pueden residir en diferentes fragmentos. Cada fragmento puede ser un grupo de réplicas utilizadas para la tolerancia a errores. Todos los fragmentos se utilizan para ejecutar una consulta en paralelo, de forma transparente para el usuario. - -## Soporte SQL {#sql-support} - -ClickHouse admite un lenguaje de consulta declarativo basado en SQL que es idéntico al estándar SQL en muchos casos. -Las consultas admitidas incluyen GROUP BY, ORDER BY, subconsultas en cláusulas FROM, IN y JOIN y subconsultas escalares. -No se admiten subconsultas y funciones de ventana dependientes. - -## Motor del vector {#vector-engine} - -Los datos no solo se almacenan mediante columnas, sino que se procesan mediante vectores (partes de columnas), lo que permite lograr una alta eficiencia de CPU. - -## Actualizaciones de datos en tiempo real {#real-time-data-updates} - -ClickHouse admite tablas con una clave principal. Para realizar consultas rápidamente en el rango de la clave principal, los datos se ordenan de forma incremental utilizando el árbol de combinación. Debido a esto, los datos se pueden agregar continuamente a la tabla. No se toman bloqueos cuando se ingieren nuevos datos. - -## Indice {#index} - -Tener un dato ordenado físicamente por clave principal permite extraer datos para sus valores específicos o rangos de valores con baja latencia, menos de unas pocas docenas de milisegundos. - -## Adecuado para consultas en línea {#suitable-for-online-queries} - -La baja latencia significa que las consultas se pueden procesar sin demora y sin intentar preparar una respuesta por adelantado, justo en el mismo momento mientras se carga la página de la interfaz de usuario. En otras palabras, en línea. - -## Soporte para cálculos aproximados {#support-for-approximated-calculations} - -ClickHouse proporciona varias formas de intercambiar precisión por rendimiento: - -1. Funciones agregadas para el cálculo aproximado del número de valores distintos, medianas y cuantiles. -2. Ejecutar una consulta basada en una parte (muestra) de datos y obtener un resultado aproximado. En este caso, se recuperan proporcionalmente menos datos del disco. -3. Ejecutar una agregación para un número limitado de claves aleatorias, en lugar de para todas las claves. Bajo ciertas condiciones para la distribución de claves en los datos, esto proporciona un resultado razonablemente preciso mientras se utilizan menos recursos. - -## Replicación de datos e integridad de datos {#data-replication-and-data-integrity-support} - -ClickHouse utiliza la replicación multi-maestro asincrónica. Después de escribir en cualquier réplica disponible, todas las réplicas restantes recuperan su copia en segundo plano. El sistema mantiene datos idénticos en diferentes réplicas. La recuperación después de la mayoría de las fallas se realiza automáticamente, o semiautomáticamente en casos complejos. - -Para obtener más información, consulte la sección [Replicación de datos](../engines/table-engines/mergetree-family/replication.md). - -## Características que pueden considerarse desventajas {#clickhouse-features-that-can-be-considered-disadvantages} - -1. No hay transacciones completas. -2. Falta de capacidad para modificar o eliminar datos ya insertados con alta tasa y baja latencia. Hay eliminaciones y actualizaciones por lotes disponibles para limpiar o modificar datos, por ejemplo, para cumplir con [GDPR](https://gdpr-info.eu). -3. El índice disperso hace que ClickHouse no sea tan adecuado para consultas de puntos que recuperan filas individuales por sus claves. - -[Artículo Original](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/es/introduction/history.md b/docs/es/introduction/history.md deleted file mode 100644 index 7311fa01959..00000000000 --- a/docs/es/introduction/history.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 7 -toc_title: Historia ---- - -# Historial de ClickHouse {#clickhouse-history} - -ClickHouse se ha desarrollado inicialmente para alimentar [El Yandex.Métrica](https://metrica.yandex.com/), [la segunda plataforma de análisis web más grande del mundo](http://w3techs.com/technologies/overview/traffic_analysis/all), y sigue siendo el componente central de este sistema. Con más de 13 billones de registros en la base de datos y más de 20 mil millones de eventos diarios, ClickHouse permite generar informes personalizados sobre la marcha directamente a partir de datos no agregados. Este artículo cubre brevemente los objetivos de ClickHouse en las primeras etapas de su desarrollo. - -El Yandex.Metrica construye informes personalizados sobre la marcha basados en hits y sesiones, con segmentos arbitrarios definidos por el usuario. Hacerlo a menudo requiere construir agregados complejos, como el número de usuarios únicos. Los nuevos datos para crear un informe llegan en tiempo real. - -A partir de abril de 2014, Yandex.Metrica estaba rastreando alrededor de 12 mil millones de eventos (vistas de páginas y clics) diariamente. Todos estos eventos deben almacenarse para crear informes personalizados. Una sola consulta puede requerir escanear millones de filas en unos pocos cientos de milisegundos, o cientos de millones de filas en solo unos segundos. - -## Uso en Yandex.Metrica y otros servicios de Yandex {#usage-in-yandex-metrica-and-other-yandex-services} - -ClickHouse sirve para múltiples propósitos en Yandex.Métrica. -Su tarea principal es crear informes en modo en línea utilizando datos no agregados. Utiliza un clúster de 374 servidores, que almacenan más de 20,3 billones de filas en la base de datos. El volumen de datos comprimidos es de aproximadamente 2 PB, sin tener en cuenta duplicados y réplicas. El volumen de datos sin comprimir (en formato TSV) sería de aproximadamente 17 PB. - -ClickHouse también juega un papel clave en los siguientes procesos: - -- Almacenamiento de datos para Session Replay de Yandex.Métrica. -- Procesamiento de datos intermedios. -- Creación de informes globales con Analytics. -- Ejecutar consultas para depurar el Yandex.Motor Metrica. -- Análisis de registros desde la API y la interfaz de usuario. - -Hoy en día, hay varias docenas de instalaciones de ClickHouse en otros servicios y departamentos de Yandex: verticales de búsqueda, comercio electrónico, publicidad, análisis de negocios, desarrollo móvil, servicios personales y otros. - -## Datos agregados y no agregados {#aggregated-and-non-aggregated-data} - -Existe una opinión generalizada de que para calcular las estadísticas de manera efectiva, debe agregar datos ya que esto reduce el volumen de datos. - -Pero la agregación de datos viene con muchas limitaciones: - -- Debe tener una lista predefinida de los informes necesarios. -- El usuario no puede hacer informes personalizados. -- Al agregar sobre un gran número de claves distintas, el volumen de datos apenas se reduce, por lo que la agregación es inútil. -- Para un gran número de informes, hay demasiadas variaciones de agregación (explosión combinatoria). -- Al agregar claves con alta cardinalidad (como las URL), el volumen de datos no se reduce en mucho (menos del doble). -- Por esta razón, el volumen de datos con agregación podría crecer en lugar de reducirse. -- Los usuarios no ven todos los informes que generamos para ellos. Una gran parte de esos cálculos es inútil. -- La integridad lógica de los datos puede ser violada para varias agregaciones. - -Si no agregamos nada y trabajamos con datos no agregados, esto podría reducir el volumen de cálculos. - -Sin embargo, con la agregación, una parte significativa del trabajo se desconecta y se completa con relativa calma. Por el contrario, los cálculos en línea requieren calcular lo más rápido posible, ya que el usuario está esperando el resultado. - -El Yandex.Metrica tiene un sistema especializado para agregar datos llamado Metrage, que se utilizó para la mayoría de los informes. -A partir de 2009, Yandex.Metrica también utilizó una base de datos OLAP especializada para datos no agregados llamada OLAPServer, que anteriormente se usaba para el generador de informes. -OLAPServer funcionó bien para datos no agregados, pero tenía muchas restricciones que no permitían que se utilizara para todos los informes según lo deseado. Estos incluyeron la falta de soporte para tipos de datos (solo números) y la incapacidad de actualizar datos de forma incremental en tiempo real (solo se podía hacer reescribiendo datos diariamente). OLAPServer no es un DBMS, sino una base de datos especializada. - -El objetivo inicial de ClickHouse era eliminar las limitaciones de OLAPServer y resolver el problema de trabajar con datos no agregados para todos los informes, pero a lo largo de los años, se ha convertido en un sistema de gestión de bases de datos de propósito general adecuado para una amplia gama de tareas analíticas. - -[Artículo Original](https://clickhouse.tech/docs/en/introduction/history/) diff --git a/docs/es/introduction/index.md b/docs/es/introduction/index.md deleted file mode 100644 index 7026dc800e4..00000000000 --- a/docs/es/introduction/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Implantaci\xF3n" -toc_priority: 1 ---- - - diff --git a/docs/es/introduction/performance.md b/docs/es/introduction/performance.md deleted file mode 100644 index 01640439128..00000000000 --- a/docs/es/introduction/performance.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 6 -toc_title: Rendimiento ---- - -# Rendimiento {#performance} - -De acuerdo con los resultados de las pruebas internas en Yandex, ClickHouse muestra el mejor rendimiento (tanto el mayor rendimiento para consultas largas como la menor latencia en consultas cortas) para escenarios operativos comparables entre los sistemas de su clase que estaban disponibles para pruebas. Puede ver los resultados de la prueba en un [página separada](https://clickhouse.tech/benchmark/dbms/). - -Numerosos puntos de referencia independientes llegaron a conclusiones similares. No son difíciles de encontrar mediante una búsqueda en Internet, o se puede ver [nuestra pequeña colección de enlaces relacionados](https://clickhouse.tech/#independent-benchmarks). - -## Rendimiento para una única consulta grande {#throughput-for-a-single-large-query} - -El rendimiento se puede medir en filas por segundo o megabytes por segundo. Si los datos se colocan en la caché de la página, una consulta que no es demasiado compleja se procesa en hardware moderno a una velocidad de aproximadamente 2-10 GB / s de datos sin comprimir en un solo servidor (para los casos más sencillos, la velocidad puede alcanzar 30 GB / s). Si los datos no se colocan en la memoria caché de la página, la velocidad depende del subsistema de disco y la velocidad de compresión de datos. Por ejemplo, si el subsistema de disco permite leer datos a 400 MB/s y la tasa de compresión de datos es 3, se espera que la velocidad sea de alrededor de 1,2 GB/s. Para obtener la velocidad en filas por segundo, divida la velocidad en bytes por segundo por el tamaño total de las columnas utilizadas en la consulta. Por ejemplo, si se extraen 10 bytes de columnas, se espera que la velocidad sea de alrededor de 100-200 millones de filas por segundo. - -La velocidad de procesamiento aumenta casi linealmente para el procesamiento distribuido, pero solo si el número de filas resultantes de la agregación o la clasificación no es demasiado grande. - -## Latencia al procesar consultas cortas {#latency-when-processing-short-queries} - -Si una consulta usa una clave principal y no selecciona demasiadas columnas y filas para procesar (cientos de miles), puede esperar menos de 50 milisegundos de latencia (dígitos individuales de milisegundos en el mejor de los casos) si los datos se colocan en la memoria caché de la página. De lo contrario, la latencia está dominada principalmente por el número de búsquedas. Si utiliza unidades de disco giratorias, para un sistema que no está sobrecargado, la latencia se puede estimar con esta fórmula: `seek time (10 ms) * count of columns queried * count of data parts`. - -## Rendimiento al procesar una gran cantidad de consultas cortas {#throughput-when-processing-a-large-quantity-of-short-queries} - -En las mismas condiciones, ClickHouse puede manejar varios cientos de consultas por segundo en un solo servidor (hasta varios miles en el mejor de los casos). Dado que este escenario no es típico para DBMS analíticos, se recomienda esperar un máximo de 100 consultas por segundo. - -## Rendimiento al insertar datos {#performance-when-inserting-data} - -Recomendamos insertar datos en paquetes de al menos 1000 filas o no más de una sola solicitud por segundo. Al insertar en una tabla MergeTree desde un volcado separado por tabuladores, la velocidad de inserción puede ser de 50 a 200 MB/s. Si las filas insertadas tienen alrededor de 1 Kb de tamaño, la velocidad será de 50,000 a 200,000 filas por segundo. Si las filas son pequeñas, el rendimiento puede ser mayor en filas por segundo (en los datos del sistema Banner -`>` 500.000 filas por segundo; en datos de grafito -`>` 1.000.000 de filas por segundo). Para mejorar el rendimiento, puede realizar varias consultas INSERT en paralelo, que se escala linealmente. - -[Artículo Original](https://clickhouse.tech/docs/en/introduction/performance/) diff --git a/docs/es/operations/access-rights.md b/docs/es/operations/access-rights.md deleted file mode 100644 index 6c777d9f081..00000000000 --- a/docs/es/operations/access-rights.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 48 -toc_title: "Control de acceso y gesti\xF3n de cuentas" ---- - -# Control de acceso y gestión de cuentas {#access-control} - -ClickHouse admite la administración de control de acceso basada en [RBAC](https://en.wikipedia.org/wiki/Role-based_access_control) enfoque. - -Entidades de acceso de ClickHouse: -- [Cuenta de usuario](#user-account-management) -- [Rol](#role-management) -- [Política de fila](#row-policy-management) -- [Perfil de configuración](#settings-profiles-management) -- [Cuota](#quotas-management) - -Puede configurar entidades de acceso utilizando: - -- Flujo de trabajo controlado por SQL. - - Es necesario [permitir](#enabling-access-control) esta funcionalidad. - -- Servidor [archivos de configuración](configuration-files.md) `users.xml` y `config.xml`. - -Se recomienda utilizar el flujo de trabajo controlado por SQL. Ambos métodos de configuración funcionan simultáneamente, por lo que si utiliza los archivos de configuración del servidor para administrar cuentas y derechos de acceso, puede pasar suavemente al flujo de trabajo controlado por SQL. - -!!! note "Advertencia" - No puede administrar la misma entidad de acceso mediante ambos métodos de configuración simultáneamente. - -## Uso {#access-control-usage} - -De forma predeterminada, el servidor ClickHouse proporciona la cuenta de usuario `default` que no está permitido usar control de acceso controlado por SQL y administración de cuentas, pero tiene todos los derechos y permisos. El `default` cuenta de usuario se utiliza en cualquier caso cuando el nombre de usuario no está definido, por ejemplo, al iniciar sesión desde el cliente o en consultas distribuidas. En el procesamiento de consultas distribuidas se utiliza una cuenta de usuario predeterminada, si la configuración del servidor o clúster no [usuario y contraseña](../engines/table-engines/special/distributed.md) propiedad. - -Si acaba de comenzar a usar ClickHouse, puede usar el siguiente escenario: - -1. [Permitir](#enabling-access-control) Control de acceso basado en SQL y gestión de cuentas `default` usuario. -2. Inicie sesión bajo el `default` cuenta de usuario y crear todos los usuarios. No olvides crear una cuenta de administrador (`GRANT ALL ON *.* WITH GRANT OPTION TO admin_user_account`). -3. [Restringir permisos](settings/permissions-for-queries.md#permissions_for_queries) para el `default` usuario y deshabilitar el control de acceso impulsado por SQL y la administración de cuentas para ello. - -### Propiedades de la solución actual {#access-control-properties} - -- Puede conceder permisos para bases de datos y tablas incluso si no existen. -- Si se eliminó una tabla, no se revocarán todos los privilegios que corresponden a esta tabla. Por lo tanto, si se crea una nueva tabla más tarde con el mismo nombre, todos los privilegios vuelven a ser reales. Para revocar los privilegios correspondientes a la tabla eliminada, debe realizar, por ejemplo, el `REVOKE ALL PRIVILEGES ON db.table FROM ALL` consulta. -- No hay ninguna configuración de por vida para los privilegios. - -## Cuenta de usuario {#user-account-management} - -Una cuenta de usuario es una entidad de acceso que permite autorizar a alguien en ClickHouse. Una cuenta de usuario contiene: - -- Información de identificación. -- [Privilegio](../sql-reference/statements/grant.md#grant-privileges) que definen un ámbito de consultas que el usuario puede realizar. -- Hosts desde los que se permite la conexión al servidor ClickHouse. -- Roles otorgados y predeterminados. -- Configuración con sus restricciones que se aplican de forma predeterminada en el inicio de sesión del usuario. -- Perfiles de configuración asignados. - -Los privilegios a una cuenta de usuario pueden ser otorgados por el [GRANT](../sql-reference/statements/grant.md) consulta o asignando [rol](#role-management). Para revocar privilegios de un usuario, ClickHouse proporciona el [REVOKE](../sql-reference/statements/revoke.md) consulta. Para listar los privilegios de un usuario, utilice - [SHOW GRANTS](../sql-reference/statements/show.md#show-grants-statement) instrucción. - -Consultas de gestión: - -- [CREATE USER](../sql-reference/statements/create.md#create-user-statement) -- [ALTER USER](../sql-reference/statements/alter.md#alter-user-statement) -- [DROP USER](../sql-reference/statements/misc.md#drop-user-statement) -- [SHOW CREATE USER](../sql-reference/statements/show.md#show-create-user-statement) - -### Ajustes Aplicación {#access-control-settings-applying} - -Los ajustes se pueden establecer de diferentes maneras: para una cuenta de usuario, en sus roles y perfiles de configuración concedidos. En un inicio de sesión de usuario, si se establece una configuración en diferentes entidades de acceso, el valor y las restricciones de esta configuración se aplican mediante las siguientes prioridades (de mayor a menor): - -1. Configuración de la cuenta de usuario. -2. La configuración de los roles predeterminados de la cuenta de usuario. Si se establece una configuración en algunos roles, el orden de la configuración que se aplica no está definido. -3. La configuración de los perfiles de configuración asignados a un usuario o a sus roles predeterminados. Si se establece una configuración en algunos perfiles, el orden de aplicación de la configuración no está definido. -4. Ajustes aplicados a todo el servidor de forma predeterminada o desde el [perfil predeterminado](server-configuration-parameters/settings.md#default-profile). - -## Rol {#role-management} - -Role es un contenedor para las entidades de acceso que se pueden conceder a una cuenta de usuario. - -El rol contiene: - -- [Privilegio](../sql-reference/statements/grant.md#grant-privileges) -- Configuración y restricciones -- Lista de funciones concedidas - -Consultas de gestión: - -- [CREATE ROLE](../sql-reference/statements/create.md#create-role-statement) -- [ALTER ROLE](../sql-reference/statements/alter.md#alter-role-statement) -- [DROP ROLE](../sql-reference/statements/misc.md#drop-role-statement) -- [SET ROLE](../sql-reference/statements/misc.md#set-role-statement) -- [SET DEFAULT ROLE](../sql-reference/statements/misc.md#set-default-role-statement) -- [SHOW CREATE ROLE](../sql-reference/statements/show.md#show-create-role-statement) - -Los privilegios a un rol pueden ser otorgados por el [GRANT](../sql-reference/statements/grant.md) consulta. Para revocar privilegios de un rol, ClickHouse proporciona el [REVOKE](../sql-reference/statements/revoke.md) consulta. - -## Política de fila {#row-policy-management} - -La directiva de filas es un filtro que define qué filas está disponible para un usuario o para un rol. La directiva de filas contiene filtros para una tabla específica y una lista de roles y/o usuarios que deben usar esta directiva de filas. - -Consultas de gestión: - -- [CREATE ROW POLICY](../sql-reference/statements/create.md#create-row-policy-statement) -- [ALTER ROW POLICY](../sql-reference/statements/alter.md#alter-row-policy-statement) -- [DROP ROW POLICY](../sql-reference/statements/misc.md#drop-row-policy-statement) -- [SHOW CREATE ROW POLICY](../sql-reference/statements/show.md#show-create-row-policy-statement) - -## Perfil de configuración {#settings-profiles-management} - -El perfil de configuración es una colección de [configuración](settings/index.md). El perfil de configuración contiene configuraciones y restricciones, y una lista de roles y/o usuarios a los que se aplica esta cuota. - -Consultas de gestión: - -- [CREATE SETTINGS PROFILE](../sql-reference/statements/create.md#create-settings-profile-statement) -- [ALTER SETTINGS PROFILE](../sql-reference/statements/alter.md#alter-settings-profile-statement) -- [DROP SETTINGS PROFILE](../sql-reference/statements/misc.md#drop-settings-profile-statement) -- [SHOW CREATE SETTINGS PROFILE](../sql-reference/statements/show.md#show-create-settings-profile-statement) - -## Cuota {#quotas-management} - -La cuota limita el uso de recursos. Ver [Cuota](quotas.md). - -La cuota contiene un conjunto de límites para algunas duraciones y una lista de roles y / o usuarios que deben usar esta cuota. - -Consultas de gestión: - -- [CREATE QUOTA](../sql-reference/statements/create.md#create-quota-statement) -- [ALTER QUOTA](../sql-reference/statements/alter.md#alter-quota-statement) -- [DROP QUOTA](../sql-reference/statements/misc.md#drop-quota-statement) -- [SHOW CREATE QUOTA](../sql-reference/statements/show.md#show-create-quota-statement) - -## Habilitación del control de acceso basado en SQL y la administración de cuentas {#enabling-access-control} - -- Configure un directorio para el almacenamiento de configuraciones. - - ClickHouse almacena las configuraciones de entidades de acceso en la carpeta [access_control_path](server-configuration-parameters/settings.md#access_control_path) parámetro de configuración del servidor. - -- Habilite el control de acceso controlado por SQL y la administración de cuentas para al menos una cuenta de usuario. - - De forma predeterminada, el control de acceso controlado por SQL y la administración de cuentas se activan para todos los usuarios. Debe configurar al menos un usuario en el `users.xml` archivo de configuración y asigne 1 al [access_management](settings/settings-users.md#access_management-user-setting) configuración. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/access_rights/) diff --git a/docs/es/operations/backup.md b/docs/es/operations/backup.md deleted file mode 100644 index be33851574a..00000000000 --- a/docs/es/operations/backup.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -toc_priority: 49 -toc_title: Copia de seguridad de datos ---- - -# Copia de seguridad de datos {#data-backup} - -Mientras que la [replicación](../engines/table-engines/mergetree-family/replication.md) proporciona protección contra fallos de hardware, no protege de errores humanos: el borrado accidental de datos, elminar la tabla equivocada o una tabla en el clúster equivocado, y bugs de software que dan como resultado un procesado incorrecto de los datos o la corrupción de los datos. En muchos casos, errores como estos afectarán a todas las réplicas. ClickHouse dispone de salvaguardas para prevenir algunos tipos de errores — por ejemplo, por defecto [no se puede simplemente eliminar tablas con un motor similar a MergeTree que contenga más de 50 Gb de datos](server-configuration-parameters/settings.md#max-table-size-to-drop). Sin embargo, estas salvaguardas no cubren todos los casos posibles y pueden eludirse. - -Para mitigar eficazmente los posibles errores humanos, debe preparar cuidadosamente una estrategia para realizar copias de seguridad y restaurar sus datos **previamente**. - -Cada empresa tiene diferentes recursos disponibles y requisitos comerciales, por lo que no existe una solución universal para las copias de seguridad y restauraciones de ClickHouse que se adapten a cada situación. Lo que funciona para un gigabyte de datos probablemente no funcionará para decenas de petabytes. Hay una variedad de posibles enfoques con sus propios pros y contras, que se discutirán a continuación. Es una buena idea utilizar varios enfoques en lugar de uno solo para compensar sus diversas deficiencias. - -!!! note "Nota" - Tenga en cuenta que si realizó una copia de seguridad de algo y nunca intentó restaurarlo, es probable que la restauración no funcione correctamente cuando realmente la necesite (o al menos tomará más tiempo de lo que las empresas pueden tolerar). Por lo tanto, cualquiera que sea el enfoque de copia de seguridad que elija, asegúrese de automatizar el proceso de restauración también y ponerlo en practica en un clúster de ClickHouse de repuesto regularmente. - -## Duplicar datos de origen en otro lugar {#duplicating-source-data-somewhere-else} - -A menudo, los datos que se ingieren en ClickHouse se entregan a través de algún tipo de cola persistente, como [Acerca de nosotros](https://kafka.apache.org). En este caso, es posible configurar un conjunto adicional de suscriptores que leerá el mismo flujo de datos mientras se escribe en ClickHouse y lo almacenará en almacenamiento en frío en algún lugar. La mayoría de las empresas ya tienen algún almacenamiento en frío recomendado por defecto, que podría ser un almacén de objetos o un sistema de archivos distribuido como [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). - -## Instantáneas del sistema de archivos {#filesystem-snapshots} - -Algunos sistemas de archivos locales proporcionan funcionalidad de instantánea (por ejemplo, [ZFS](https://en.wikipedia.org/wiki/ZFS)), pero podrían no ser la mejor opción para servir consultas en vivo. Una posible solución es crear réplicas adicionales con este tipo de sistema de archivos y excluirlas del [Distribuido](../engines/table-engines/special/distributed.md) tablas que se utilizan para `SELECT` consulta. Las instantáneas en tales réplicas estarán fuera del alcance de cualquier consulta que modifique los datos. Como beneficio adicional, estas réplicas podrían tener configuraciones de hardware especiales con más discos conectados por servidor, lo que sería rentable. - -## Método de codificación de datos: {#clickhouse-copier} - -[Método de codificación de datos:](utilities/clickhouse-copier.md) es una herramienta versátil que se creó inicialmente para volver a dividir tablas de tamaño petabyte. También se puede usar con fines de copia de seguridad y restauración porque copia datos de forma fiable entre tablas y clústeres de ClickHouse. - -Para volúmenes de datos más pequeños, un simple `INSERT INTO ... SELECT ...` a tablas remotas podría funcionar también. - -## Manipulaciones con piezas {#manipulations-with-parts} - -ClickHouse permite usar la consulta `ALTER TABLE ... FREEZE PARTITION ...` para crear una copia local de particiones de tabla. Esto se implementa utilizando enlaces duros a la carpeta `/var/lib/clickhouse/shadow/`, por lo que generalmente no consume espacio adicional en disco para datos antiguos. Las copias creadas de archivos no son manejadas por el servidor ClickHouse, por lo que puede dejarlas allí: tendrá una copia de seguridad simple que no requiere ningún sistema externo adicional, pero seguirá siendo propenso a problemas de hardware. Por esta razón, es mejor copiarlos de forma remota en otra ubicación y luego eliminar las copias locales. Los sistemas de archivos distribuidos y los almacenes de objetos siguen siendo una buena opción para esto, pero los servidores de archivos conectados normales con una capacidad lo suficientemente grande podrían funcionar también (en este caso, la transferencia ocurrirá a través del sistema de archivos de red o tal vez [rsync](https://en.wikipedia.org/wiki/Rsync)). - -Para obtener más información sobre las consultas relacionadas con las manipulaciones de particiones, consulte [Documentación de ALTER](../sql-reference/statements/alter.md#alter_manipulations-with-partitions). - -Una herramienta de terceros está disponible para automatizar este enfoque: [Haga clic en el botón de copia de seguridad](https://github.com/AlexAkulov/clickhouse-backup). - -[Artículo Original](https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/es/operations/configuration-files.md b/docs/es/operations/configuration-files.md deleted file mode 100644 index d9aa8567868..00000000000 --- a/docs/es/operations/configuration-files.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 50 -toc_title: "Archivos de configuraci\xF3n" ---- - -# Archivos de configuración {#configuration_files} - -ClickHouse admite la administración de configuración de varios archivos. El archivo de configuración del servidor principal es `/etc/clickhouse-server/config.xml`. Otros archivos deben estar en el `/etc/clickhouse-server/config.d` directorio. - -!!! note "Nota" - Todos los archivos de configuración deben estar en formato XML. Además, deben tener el mismo elemento raíz, generalmente ``. - -Algunos valores especificados en el archivo de configuración principal se pueden anular en otros archivos de configuración. El `replace` o `remove` se pueden especificar atributos para los elementos de estos archivos de configuración. - -Si no se especifica ninguno, combina el contenido de los elementos de forma recursiva, reemplazando los valores de los elementos secundarios duplicados. - -Si `replace` se especifica, reemplaza todo el elemento por el especificado. - -Si `remove` se especifica, elimina el elemento. - -La configuración también puede definir “substitutions”. Si un elemento tiene el `incl` atributo, la sustitución correspondiente del archivo se utilizará como el valor. De forma predeterminada, la ruta al archivo con sustituciones es `/etc/metrika.xml`. Esto se puede cambiar en el [include_from](server-configuration-parameters/settings.md#server_configuration_parameters-include_from) elemento en la configuración del servidor. Los valores de sustitución se especifican en `/yandex/substitution_name` elementos en este archivo. Si una sustitución especificada en `incl` no existe, se registra en el registro. Para evitar que ClickHouse registre las sustituciones que faltan, especifique `optional="true"` atributo (por ejemplo, ajustes para [macro](server-configuration-parameters/settings.md)). - -Las sustituciones también se pueden realizar desde ZooKeeper. Para hacer esto, especifique el atributo `from_zk = "/path/to/node"`. El valor del elemento se sustituye por el contenido del nodo en `/path/to/node` en ZooKeeper. También puede colocar un subárbol XML completo en el nodo ZooKeeper y se insertará completamente en el elemento de origen. - -El `config.xml` file puede especificar una configuración separada con configuraciones de usuario, perfiles y cuotas. La ruta relativa a esta configuración se establece en el `users_config` elemento. Por defecto, es `users.xml`. Si `users_config` se omite, la configuración de usuario, los perfiles y las cuotas se especifican directamente en `config.xml`. - -La configuración de los usuarios se puede dividir en archivos separados similares a `config.xml` y `config.d/`. -El nombre del directorio se define como `users_config` sin `.xml` postfix concatenado con `.d`. -Directorio `users.d` se utiliza por defecto, como `users_config` por defecto `users.xml`. -Por ejemplo, puede tener un archivo de configuración separado para cada usuario como este: - -``` bash -$ cat /etc/clickhouse-server/users.d/alice.xml -``` - -``` xml - - - - analytics - - ::/0 - - ... - analytics - - - -``` - -Para cada archivo de configuración, el servidor también genera `file-preprocessed.xml` archivos al iniciar. Estos archivos contienen todas las sustituciones y anulaciones completadas, y están destinados para uso informativo. Si se utilizaron sustituciones de ZooKeeper en los archivos de configuración pero ZooKeeper no está disponible en el inicio del servidor, el servidor carga la configuración desde el archivo preprocesado. - -El servidor realiza un seguimiento de los cambios en los archivos de configuración, así como archivos y nodos ZooKeeper que se utilizaron al realizar sustituciones y anulaciones, y vuelve a cargar la configuración de los usuarios y clústeres sobre la marcha. Esto significa que puede modificar el clúster, los usuarios y su configuración sin reiniciar el servidor. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/configuration_files/) diff --git a/docs/es/operations/index.md b/docs/es/operations/index.md deleted file mode 100644 index 9a928fa0f01..00000000000 --- a/docs/es/operations/index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Operaci\xF3n" -toc_priority: 41 -toc_title: "Implantaci\xF3n" ---- - -# Operación {#operations} - -El manual de operaciones de ClickHouse consta de las siguientes secciones principales: - -- [Requisito](requirements.md) -- [Monitoreo](monitoring.md) -- [Solución de problemas](troubleshooting.md) -- [Recomendaciones de uso](tips.md) -- [Procedimiento de actualización](update.md) -- [Derechos de acceso](access-rights.md) -- [Copia de seguridad de datos](backup.md) -- [Archivos de configuración](configuration-files.md) -- [Cuota](quotas.md) -- [Tablas del sistema](system-tables.md) -- [Parámetros de configuración del servidor](server-configuration-parameters/index.md) -- [Cómo probar su hardware con ClickHouse](performance-test.md) -- [Configuración](settings/index.md) -- [Utilidad](utilities/index.md) - -{## [Artículo Original](https://clickhouse.tech/docs/en/operations/) ##} diff --git a/docs/es/operations/monitoring.md b/docs/es/operations/monitoring.md deleted file mode 100644 index 19912d23f3b..00000000000 --- a/docs/es/operations/monitoring.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 45 -toc_title: Monitoreo ---- - -# Monitoreo {#monitoring} - -Usted puede monitorear: - -- Utilización de recursos de hardware. -- Métricas del servidor ClickHouse. - -## Utilización de recursos {#resource-utilization} - -ClickHouse no supervisa el estado de los recursos de hardware por sí mismo. - -Se recomienda encarecidamente configurar la supervisión para: - -- Carga y temperatura en los procesadores. - - Usted puede utilizar [dmesg](https://en.wikipedia.org/wiki/Dmesg), [Turbostat](https://www.linux.org/docs/man8/turbostat.html) u otros instrumentos. - -- Utilización del sistema de almacenamiento, RAM y red. - -## Métricas del servidor ClickHouse {#clickhouse-server-metrics} - -El servidor ClickHouse tiene instrumentos integrados para el monitoreo de estado propio. - -Para realizar un seguimiento de los eventos del servidor, use los registros del servidor. Ver el [registrador](server-configuration-parameters/settings.md#server_configuration_parameters-logger) sección del archivo de configuración. - -ClickHouse recoge: - -- Diferentes métricas de cómo el servidor utiliza recursos computacionales. -- Estadísticas comunes sobre el procesamiento de consultas. - -Puede encontrar métricas en el [sistema.métricas](../operations/system-tables.md#system_tables-metrics), [sistema.evento](../operations/system-tables.md#system_tables-events), y [sistema.asynchronous_metrics](../operations/system-tables.md#system_tables-asynchronous_metrics) tabla. - -Puede configurar ClickHouse para exportar métricas a [Grafito](https://github.com/graphite-project). Ver el [Sección de grafito](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) en el archivo de configuración del servidor ClickHouse. Antes de configurar la exportación de métricas, debe configurar Graphite siguiendo sus [guiar](https://graphite.readthedocs.io/en/latest/install.html). - -Puede configurar ClickHouse para exportar métricas a [Prometeo](https://prometheus.io). Ver el [Sección Prometheus](server-configuration-parameters/settings.md#server_configuration_parameters-prometheus) en el archivo de configuración del servidor ClickHouse. Antes de configurar la exportación de métricas, debe configurar Prometheus siguiendo su oficial [guiar](https://prometheus.io/docs/prometheus/latest/installation/). - -Además, puede supervisar la disponibilidad del servidor a través de la API HTTP. Enviar el `HTTP GET` solicitud de `/ping`. Si el servidor está disponible, responde con `200 OK`. - -Para supervisar servidores en una configuración de clúster, debe establecer [max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) parámetro y utilizar el recurso HTTP `/replicas_status`. Una solicitud para `/replicas_status` devoluciones `200 OK` si la réplica está disponible y no se retrasa detrás de las otras réplicas. Si una réplica se retrasa, devuelve `503 HTTP_SERVICE_UNAVAILABLE` con información sobre la brecha. diff --git a/docs/es/operations/optimizing-performance/index.md b/docs/es/operations/optimizing-performance/index.md deleted file mode 100644 index d2796c6e0d3..00000000000 --- a/docs/es/operations/optimizing-performance/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Optimizaci\xF3n del rendimiento" -toc_priority: 52 ---- - - diff --git a/docs/es/operations/optimizing-performance/sampling-query-profiler.md b/docs/es/operations/optimizing-performance/sampling-query-profiler.md deleted file mode 100644 index a474dde6af2..00000000000 --- a/docs/es/operations/optimizing-performance/sampling-query-profiler.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 54 -toc_title: "Generaci\xF3n de perfiles de consultas" ---- - -# Analizador de consultas de muestreo {#sampling-query-profiler} - -ClickHouse ejecuta el generador de perfiles de muestreo que permite analizar la ejecución de consultas. Utilizando el generador de perfiles puede encontrar rutinas de código fuente que se utilizan con más frecuencia durante la ejecución de la consulta. Puede rastrear el tiempo de CPU y el tiempo de reloj de pared invertido, incluido el tiempo de inactividad. - -Para usar el generador de perfiles: - -- Configurar el [trace_log](../server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) sección de la configuración del servidor. - - Esta sección configura la [trace_log](../../operations/system-tables.md#system_tables-trace_log) tabla del sistema que contiene los resultados del funcionamiento del generador de perfiles. Está configurado de forma predeterminada. Recuerde que los datos de esta tabla solo son válidos para un servidor en ejecución. Después de reiniciar el servidor, ClickHouse no limpia la tabla y toda la dirección de memoria virtual almacenada puede dejar de ser válida. - -- Configurar el [Los resultados de la prueba](../settings/settings.md#query_profiler_cpu_time_period_ns) o [query_profiler_real_time_period_ns](../settings/settings.md#query_profiler_real_time_period_ns) configuración. Ambos ajustes se pueden utilizar simultáneamente. - - Estas opciones le permiten configurar temporizadores del generador de perfiles. Como estos son los ajustes de sesión, puede obtener diferentes frecuencias de muestreo para todo el servidor, usuarios individuales o perfiles de usuario, para su sesión interactiva y para cada consulta individual. - -La frecuencia de muestreo predeterminada es una muestra por segundo y tanto la CPU como los temporizadores reales están habilitados. Esta frecuencia permite recopilar suficiente información sobre el clúster ClickHouse. Al mismo tiempo, al trabajar con esta frecuencia, el generador de perfiles no afecta el rendimiento del servidor ClickHouse. Si necesita perfilar cada consulta individual, intente usar una mayor frecuencia de muestreo. - -Para analizar el `trace_log` tabla del sistema: - -- Instale el `clickhouse-common-static-dbg` paquete. Ver [Instalar desde paquetes DEB](../../getting-started/install.md#install-from-deb-packages). - -- Permitir funciones de introspección [allow_introspection_functions](../settings/settings.md#settings-allow_introspection_functions) configuración. - - Por razones de seguridad, las funciones de introspección están deshabilitadas de forma predeterminada. - -- Utilice el `addressToLine`, `addressToSymbol` y `demangle` [funciones de la introspección](../../sql-reference/functions/introspection.md) para obtener nombres de funciones y sus posiciones en el código ClickHouse. Para obtener un perfil para alguna consulta, debe agregar datos del `trace_log` tabla. Puede agregar datos por funciones individuales o por los seguimientos de pila completos. - -Si necesita visualizar `trace_log` información, intente [Flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) y [Nivel de Cifrado WEP](https://github.com/laplab/clickhouse-speedscope). - -## Ejemplo {#example} - -En este ejemplo nos: - -- Filtrado `trace_log` datos por un identificador de consulta y la fecha actual. - -- Agregando por seguimiento de pila. - -- Usando funciones de introspección, obtendremos un informe de: - - - Nombres de símbolos y funciones de código fuente correspondientes. - - Ubicaciones del código fuente de estas funciones. - - - -``` sql -SELECT - count(), - arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym -FROM system.trace_log -WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) -GROUP BY trace -ORDER BY count() DESC -LIMIT 10 -``` - -``` text -{% include "examples/sampling_query_profiler_result.txt" %} -``` diff --git a/docs/es/operations/performance-test.md b/docs/es/operations/performance-test.md deleted file mode 100644 index 97444f339cd..00000000000 --- a/docs/es/operations/performance-test.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 54 -toc_title: Prueba de hardware ---- - -# Cómo probar su hardware con ClickHouse {#how-to-test-your-hardware-with-clickhouse} - -Con esta instrucción, puede ejecutar una prueba de rendimiento básica de ClickHouse en cualquier servidor sin instalar paquetes de ClickHouse. - -1. Ir a “commits” página: https://github.com/ClickHouse/ClickHouse/commits/master - -2. Haga clic en la primera marca de verificación verde o cruz roja con verde “ClickHouse Build Check” y haga clic en el “Details” enlace cerca “ClickHouse Build Check”. No existe tal enlace en algunas confirmaciones, por ejemplo, confirmaciones con documentación. En este caso, elija la confirmación más cercana que tenga este enlace. - -3. Copie el enlace a “clickhouse” binario para amd64 o aarch64. - -4. ssh al servidor y descargarlo con wget: - - - - # For amd64: - wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse - # For aarch64: - wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse - # Then do: - chmod a+x clickhouse - -1. Descargar configs: - - - - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml - mkdir config.d - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml - -1. Descargar archivos de referencia: - - - - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh - chmod a+x benchmark-new.sh - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql - -1. Descargue los datos de prueba de acuerdo con el [El Yandex.Conjunto de datos de Metrica](../getting-started/example-datasets/metrica.md) instrucción (“hits” tabla que contiene 100 millones de filas). - - - - wget https://datasets.clickhouse.tech/hits/partitions/hits_100m_obfuscated_v1.tar.xz - tar xvf hits_100m_obfuscated_v1.tar.xz -C . - mv hits_100m_obfuscated_v1/* . - -1. Ejecute el servidor: - - - - ./clickhouse server - -1. Verifique los datos: ssh al servidor en otro terminal - - - - ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" - 100000000 - -1. Edite el benchmark-new.sh, cambie `clickhouse-client` a `./clickhouse client` y añadir `–-max_memory_usage 100000000000` parámetro. - - - - mcedit benchmark-new.sh - -1. Ejecute el punto de referencia: - - - - ./benchmark-new.sh hits_100m_obfuscated - -1. Envíe los números y la información sobre la configuración de su hardware a clickhouse-feedback@yandex-team.com - -Todos los resultados se publican aquí: https://clickhouse.tecnología/punto de referencia/hardware/ diff --git a/docs/es/operations/quotas.md b/docs/es/operations/quotas.md deleted file mode 100644 index 9d84ce21339..00000000000 --- a/docs/es/operations/quotas.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 51 -toc_title: Cuota ---- - -# Cuota {#quotas} - -Las cuotas le permiten limitar el uso de recursos durante un período de tiempo o realizar un seguimiento del uso de recursos. -Las cuotas se configuran en la configuración del usuario, que generalmente ‘users.xml’. - -El sistema también tiene una característica para limitar la complejidad de una sola consulta. Vea la sección “Restrictions on query complexity”). - -A diferencia de las restricciones de complejidad de consultas, las cuotas: - -- Coloque restricciones en un conjunto de consultas que se pueden ejecutar durante un período de tiempo, en lugar de limitar una sola consulta. -- Tenga en cuenta los recursos gastados en todos los servidores remotos para el procesamiento de consultas distribuidas. - -Veamos la sección del ‘users.xml’ fichero que define las cuotas. - -``` xml - - - - - - - - 3600 - - - 0 - 0 - 0 - 0 - 0 - - -``` - -De forma predeterminada, la cuota realiza un seguimiento del consumo de recursos para cada hora, sin limitar el uso. -El consumo de recursos calculado para cada intervalo se envía al registro del servidor después de cada solicitud. - -``` xml - - - - - 3600 - - 1000 - 100 - 1000000000 - 100000000000 - 900 - - - - 86400 - - 10000 - 1000 - 5000000000 - 500000000000 - 7200 - - -``` - -Para el ‘statbox’ Las restricciones se establecen por cada hora y por cada 24 horas (86.400 segundos). El intervalo de tiempo se cuenta, a partir de un momento fijo definido por la implementación en el tiempo. En otras palabras, el intervalo de 24 horas no necesariamente comienza a medianoche. - -Cuando finaliza el intervalo, se borran todos los valores recopilados. Para la siguiente hora, el cálculo de la cuota comienza de nuevo. - -Estas son las cantidades que se pueden restringir: - -`queries` – The total number of requests. - -`errors` – The number of queries that threw an exception. - -`result_rows` – The total number of rows given as a result. - -`read_rows` – The total number of source rows read from tables for running the query on all remote servers. - -`execution_time` – The total query execution time, in seconds (wall time). - -Si se excede el límite durante al menos un intervalo de tiempo, se lanza una excepción con un texto sobre qué restricción se excedió, para qué intervalo y cuándo comienza el nuevo intervalo (cuando se pueden enviar consultas nuevamente). - -Las cuotas pueden usar el “quota key” característica para informar sobre los recursos para múltiples claves de forma independiente. Aquí hay un ejemplo de esto: - -``` xml - - - - -``` - -La cuota se asigna a los usuarios ‘users’ sección de la configuración. Vea la sección “Access rights”. - -Para el procesamiento de consultas distribuidas, los importes acumulados se almacenan en el servidor del solicitante. Entonces, si el usuario va a otro servidor, la cuota allí “start over”. - -Cuando se reinicia el servidor, las cuotas se restablecen. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/quotas/) diff --git a/docs/es/operations/requirements.md b/docs/es/operations/requirements.md deleted file mode 100644 index d6f0f25cf21..00000000000 --- a/docs/es/operations/requirements.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 44 -toc_title: Requisito ---- - -# Requisito {#requirements} - -## CPU {#cpu} - -Para la instalación desde paquetes deb precompilados, utilice una CPU con arquitectura x86_64 y soporte para las instrucciones de SSE 4.2. Para ejecutar ClickHouse con procesadores que no admiten SSE 4.2 o tienen arquitectura AArch64 o PowerPC64LE, debe compilar ClickHouse a partir de fuentes. - -ClickHouse implementa el procesamiento de datos paralelo y utiliza todos los recursos de hardware disponibles. Al elegir un procesador, tenga en cuenta que ClickHouse funciona de manera más eficiente en configuraciones con un gran número de núcleos pero con una velocidad de reloj más baja que en configuraciones con menos núcleos y una velocidad de reloj más alta. Por ejemplo, 16 núcleos con 2600 MHz es preferible a 8 núcleos con 3600 MHz. - -Se recomienda usar **Impulso de Turbo** y **hiper-threading** tecnología. Mejora significativamente el rendimiento con una carga de trabajo típica. - -## RAM {#ram} - -Recomendamos utilizar un mínimo de 4 GB de RAM para realizar consultas no triviales. El servidor ClickHouse puede ejecutarse con una cantidad mucho menor de RAM, pero requiere memoria para procesar consultas. - -El volumen requerido de RAM depende de: - -- La complejidad de las consultas. -- La cantidad de datos que se procesan en las consultas. - -Para calcular el volumen requerido de RAM, debe estimar el tamaño de los datos temporales para [GROUP BY](../sql-reference/statements/select/group-by.md#select-group-by-clause), [DISTINCT](../sql-reference/statements/select/distinct.md#select-distinct), [JOIN](../sql-reference/statements/select/join.md#select-join) y otras operaciones que utilice. - -ClickHouse puede usar memoria externa para datos temporales. Ver [GROUP BY en memoria externa](../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory) para más detalles. - -## Archivo de intercambio {#swap-file} - -Deshabilite el archivo de intercambio para entornos de producción. - -## Subsistema de almacenamiento {#storage-subsystem} - -Necesita tener 2 GB de espacio libre en disco para instalar ClickHouse. - -El volumen de almacenamiento requerido para sus datos debe calcularse por separado. La evaluación debe incluir: - -- Estimación del volumen de datos. - - Puede tomar una muestra de los datos y obtener el tamaño promedio de una fila de ella. Luego multiplique el valor por el número de filas que planea almacenar. - -- El coeficiente de compresión de datos. - - Para estimar el coeficiente de compresión de datos, cargue una muestra de sus datos en ClickHouse y compare el tamaño real de los datos con el tamaño de la tabla almacenada. Por ejemplo, los datos de clickstream generalmente se comprimen de 6 a 10 veces. - -Para calcular el volumen final de datos que se almacenarán, aplique el coeficiente de compresión al volumen de datos estimado. Si planea almacenar datos en varias réplicas, multiplique el volumen estimado por el número de réplicas. - -## Red {#network} - -Si es posible, use redes de 10G o clase superior. - -El ancho de banda de la red es fundamental para procesar consultas distribuidas con una gran cantidad de datos intermedios. Además, la velocidad de la red afecta a los procesos de replicación. - -## Software {#software} - -ClickHouse está desarrollado principalmente para la familia de sistemas operativos Linux. La distribución de Linux recomendada es Ubuntu. El `tzdata` paquete debe ser instalado en el sistema. - -ClickHouse también puede funcionar en otras familias de sistemas operativos. Ver detalles en el [Primeros pasos](../getting-started/index.md) sección de la documentación. diff --git a/docs/es/operations/server-configuration-parameters/index.md b/docs/es/operations/server-configuration-parameters/index.md deleted file mode 100644 index e1e2e777b94..00000000000 --- a/docs/es/operations/server-configuration-parameters/index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Par\xE1metros de configuraci\xF3n del servidor" -toc_priority: 54 -toc_title: "Implantaci\xF3n" ---- - -# Parámetros de configuración del servidor {#server-settings} - -Esta sección contiene descripciones de la configuración del servidor que no se puede cambiar en el nivel de sesión o consulta. - -Estos ajustes se almacenan en el `config.xml` archivo en el servidor ClickHouse. - -Otros ajustes se describen en el “[Configuración](../settings/index.md#session-settings-intro)” apartado. - -Antes de estudiar la configuración, lea el [Archivos de configuración](../configuration-files.md#configuration_files) sección y tomar nota del uso de sustituciones (el `incl` y `optional` atributo). - -[Artículo Original](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/) diff --git a/docs/es/operations/server-configuration-parameters/settings.md b/docs/es/operations/server-configuration-parameters/settings.md deleted file mode 100644 index 86264ed0440..00000000000 --- a/docs/es/operations/server-configuration-parameters/settings.md +++ /dev/null @@ -1,906 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 57 -toc_title: "Configuraci\xF3n del servidor" ---- - -# Configuración del servidor {#server-settings} - -## builtin_dictionaries_reload_interval {#builtin-dictionaries-reload-interval} - -El intervalo en segundos antes de volver a cargar los diccionarios integrados. - -ClickHouse recarga los diccionarios incorporados cada x segundos. Esto hace posible editar diccionarios “on the fly” sin reiniciar el servidor. - -Valor predeterminado: 3600. - -**Ejemplo** - -``` xml -3600 -``` - -## compresión {#server-settings-compression} - -Ajustes de compresión de datos para [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md)-mesas de motor. - -!!! warning "Advertencia" - No lo use si acaba de comenzar a usar ClickHouse. - -Plantilla de configuración: - -``` xml - - - ... - ... - ... - - ... - -``` - -`` campo: - -- `min_part_size` – The minimum size of a data part. -- `min_part_size_ratio` – The ratio of the data part size to the table size. -- `method` – Compression method. Acceptable values: `lz4` o `zstd`. - -Puede configurar múltiples `` apartado. - -Acciones cuando se cumplen las condiciones: - -- Si un elemento de datos coincide con un conjunto de condiciones, ClickHouse utiliza el método de compresión especificado. -- Si un elemento de datos coincide con varios conjuntos de condiciones, ClickHouse utiliza el primer conjunto de condiciones coincidente. - -Si no se cumplen condiciones para un elemento de datos, ClickHouse utiliza el `lz4` compresión. - -**Ejemplo** - -``` xml - - - 10000000000 - 0.01 - zstd - - -``` - -## default_database {#default-database} - -La base de datos predeterminada. - -Para obtener una lista de bases de datos, [SHOW DATABASES](../../sql-reference/statements/show.md#show-databases) consulta. - -**Ejemplo** - -``` xml -default -``` - -## default_profile {#default-profile} - -Perfil de configuración predeterminado. - -Los perfiles de configuración se encuentran en el archivo especificado en el parámetro `user_config`. - -**Ejemplo** - -``` xml -default -``` - -## Diccionarios_config {#server_configuration_parameters-dictionaries_config} - -La ruta de acceso al archivo de configuración para diccionarios externos. - -Camino: - -- Especifique la ruta absoluta o la ruta relativa al archivo de configuración del servidor. -- La ruta puede contener comodines \* y ?. - -Ver también “[Diccionarios externos](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)”. - -**Ejemplo** - -``` xml -*_dictionary.xml -``` - -## Diccionarios_lazy_load {#server_configuration_parameters-dictionaries_lazy_load} - -La carga perezosa de los diccionarios. - -Si `true`, entonces cada diccionario es creado en el primer uso. Si se produce un error en la creación del diccionario, la función que estaba utilizando el diccionario produce una excepción. - -Si `false`, todos los diccionarios se crean cuando se inicia el servidor, y si hay un error, el servidor se apaga. - -El valor predeterminado es `true`. - -**Ejemplo** - -``` xml -true -``` - -## format_schema_path {#server_configuration_parameters-format_schema_path} - -La ruta de acceso al directorio con los esquemas para los datos de entrada, como los esquemas [CapnProto](../../interfaces/formats.md#capnproto) formato. - -**Ejemplo** - -``` xml - - format_schemas/ -``` - -## grafito {#server_configuration_parameters-graphite} - -Envío de datos a [Grafito](https://github.com/graphite-project). - -Configuración: - -- host – The Graphite server. -- port – The port on the Graphite server. -- interval – The interval for sending, in seconds. -- timeout – The timeout for sending data, in seconds. -- root_path – Prefix for keys. -- metrics – Sending data from the [sistema.métricas](../../operations/system-tables.md#system_tables-metrics) tabla. -- events – Sending deltas data accumulated for the time period from the [sistema.evento](../../operations/system-tables.md#system_tables-events) tabla. -- events_cumulative – Sending cumulative data from the [sistema.evento](../../operations/system-tables.md#system_tables-events) tabla. -- asynchronous_metrics – Sending data from the [sistema.asynchronous_metrics](../../operations/system-tables.md#system_tables-asynchronous_metrics) tabla. - -Puede configurar múltiples `` clausula. Por ejemplo, puede usar esto para enviar datos diferentes a intervalos diferentes. - -**Ejemplo** - -``` xml - - localhost - 42000 - 0.1 - 60 - one_min - true - true - false - true - -``` - -## graphite_rollup {#server_configuration_parameters-graphite-rollup} - -Ajustes para reducir los datos de grafito. - -Para obtener más información, consulte [GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md). - -**Ejemplo** - -``` xml - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - -``` - -## http_port/https_port {#http-porthttps-port} - -El puerto para conectarse al servidor a través de HTTP(s). - -Si `https_port` se especifica, [openSSL](#server_configuration_parameters-openssl) debe ser configurado. - -Si `http_port` se especifica, la configuración de OpenSSL se ignora incluso si está establecida. - -**Ejemplo** - -``` xml -9999 -``` - -## http_server_default_response {#server_configuration_parameters-http_server_default_response} - -La página que se muestra de forma predeterminada al acceder al servidor HTTP de ClickHouse. -El valor predeterminado es “Ok.” (con un avance de línea al final) - -**Ejemplo** - -Abrir `https://tabix.io/` al acceder `http://localhost: http_port`. - -``` xml - -
]]> -
-``` - -## include_from {#server_configuration_parameters-include_from} - -La ruta al archivo con sustituciones. - -Para obtener más información, consulte la sección “[Archivos de configuración](../configuration-files.md#configuration_files)”. - -**Ejemplo** - -``` xml -/etc/metrica.xml -``` - -## Interesante {#interserver-http-port} - -Puerto para el intercambio de datos entre servidores ClickHouse. - -**Ejemplo** - -``` xml -9009 -``` - -## Sistema abierto {#interserver-http-host} - -El nombre de host que pueden utilizar otros servidores para acceder a este servidor. - -Si se omite, se define de la misma manera que el `hostname-f` comando. - -Útil para separarse de una interfaz de red específica. - -**Ejemplo** - -``` xml -example.yandex.ru -``` - -## interserver_http_credentials {#server-settings-interserver-http-credentials} - -El nombre de usuario y la contraseña utilizados para [replicación](../../engines/table-engines/mergetree-family/replication.md) con los motores Replicated\*. Estas credenciales sólo se utilizan para la comunicación entre réplicas y no están relacionadas con las credenciales de los clientes de ClickHouse. El servidor está comprobando estas credenciales para conectar réplicas y utiliza las mismas credenciales cuando se conecta a otras réplicas. Por lo tanto, estas credenciales deben establecerse igual para todas las réplicas de un clúster. -De forma predeterminada, la autenticación no se utiliza. - -Esta sección contiene los siguientes parámetros: - -- `user` — username. -- `password` — password. - -**Ejemplo** - -``` xml - - admin - 222 - -``` - -## keep_alive_timeout {#keep-alive-timeout} - -El número de segundos que ClickHouse espera las solicitudes entrantes antes de cerrar la conexión. El valor predeterminado es de 3 segundos. - -**Ejemplo** - -``` xml -3 -``` - -## listen_host {#server_configuration_parameters-listen_host} - -Restricción en hosts de los que pueden provenir las solicitudes. Si desea que el servidor responda a todos ellos, especifique `::`. - -Ejemplos: - -``` xml -::1 -127.0.0.1 -``` - -## registrador {#server_configuration_parameters-logger} - -Configuración de registro. - -Claves: - -- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. -- log – The log file. Contains all the entries according to `level`. -- errorlog – Error log file. -- size – Size of the file. Applies to `log`y`errorlog`. Una vez que el archivo alcanza `size`, ClickHouse archiva y cambia el nombre, y crea un nuevo archivo de registro en su lugar. -- count – The number of archived log files that ClickHouse stores. - -**Ejemplo** - -``` xml - - trace - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 10 - -``` - -También se admite la escritura en el syslog. Config ejemplo: - -``` xml - - 1 - -
syslog.remote:10514
- myhost.local - LOG_LOCAL6 - syslog -
-
-``` - -Claves: - -- use_syslog — Required setting if you want to write to the syslog. -- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. -- hostname — Optional. The name of the host that logs are sent from. -- facility — [La palabra clave syslog facility](https://en.wikipedia.org/wiki/Syslog#Facility) en letras mayúsculas con el “LOG_” prefijo: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3` y así sucesivamente). - Valor predeterminado: `LOG_USER` si `address` se especifica, `LOG_DAEMON otherwise.` -- format – Message format. Possible values: `bsd` y `syslog.` - -## macro {#macros} - -Sustituciones de parámetros para tablas replicadas. - -Se puede omitir si no se utilizan tablas replicadas. - -Para obtener más información, consulte la sección “[Creación de tablas replicadas](../../engines/table-engines/mergetree-family/replication.md)”. - -**Ejemplo** - -``` xml - -``` - -## Método de codificación de datos: {#server-mark-cache-size} - -Tamaño aproximado (en bytes) de la memoria caché de marcas utilizadas por los motores de [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md) familia. - -La memoria caché se comparte para el servidor y la memoria se asigna según sea necesario. El tamaño de la memoria caché debe ser al menos 5368709120. - -**Ejemplo** - -``` xml -5368709120 -``` - -## max_concurrent_queries {#max-concurrent-queries} - -El número máximo de solicitudes procesadas simultáneamente. - -**Ejemplo** - -``` xml -100 -``` - -## max_connections {#max-connections} - -El número máximo de conexiones entrantes. - -**Ejemplo** - -``` xml -4096 -``` - -## max_open_files {#max-open-files} - -El número máximo de archivos abiertos. - -Predeterminada: `maximum`. - -Recomendamos usar esta opción en Mac OS X desde el `getrlimit()` función devuelve un valor incorrecto. - -**Ejemplo** - -``` xml -262144 -``` - -## max_table_size_to_drop {#max-table-size-to-drop} - -Restricción en la eliminación de tablas. - -Si el tamaño de un [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md) mesa excede `max_table_size_to_drop` (en bytes), no puede eliminarlo usando una consulta DROP. - -Si aún necesita eliminar la tabla sin reiniciar el servidor ClickHouse, cree el `/flags/force_drop_table` y ejecute la consulta DROP. - -Valor predeterminado: 50 GB. - -El valor 0 significa que puede eliminar todas las tablas sin restricciones. - -**Ejemplo** - -``` xml -0 -``` - -## merge_tree {#server_configuration_parameters-merge_tree} - -Ajuste fino para tablas en el [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md). - -Para obtener más información, vea MergeTreeSettings.h archivo de encabezado. - -**Ejemplo** - -``` xml - - 5 - -``` - -## openSSL {#server_configuration_parameters-openssl} - -Configuración cliente/servidor SSL. - -El soporte para SSL es proporcionado por el `libpoco` biblioteca. La interfaz se describe en el archivo [Nombre de la red inalámbrica (SSID):h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) - -Claves para la configuración del servidor/cliente: - -- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. -- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` contiene el certificado. -- caConfig – The path to the file or directory that contains trusted root certificates. -- verificationMode – The method for checking the node's certificates. Details are in the description of the [Contexto](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) clase. Valores posibles: `none`, `relaxed`, `strict`, `once`. -- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. -- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| -- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. -- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Valores aceptables: `true`, `false`. -- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. Este parámetro siempre se recomienda ya que ayuda a evitar problemas tanto si el servidor almacena en caché la sesión como si el cliente solicita el almacenamiento en caché. Valor predeterminado: `${application.name}`. -- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. -- sessionTimeout – Time for caching the session on the server. -- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. -- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. -- requireTLSv1_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. -- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. -- fips – Activates OpenSSL FIPS mode. Supported if the library's OpenSSL version supports FIPS. -- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. -- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . -- disableProtocols – Protocols that are not allowed to use. -- preferServerCiphers – Preferred server ciphers on the client. - -**Ejemplo de configuración:** - -``` xml - - - - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key - - /etc/clickhouse-server/dhparam.pem - none - true - true - sslv2,sslv3 - true - - - true - true - sslv2,sslv3 - true - - - - RejectCertificateHandler - - - -``` - -## part_log {#server_configuration_parameters-part-log} - -Registro de eventos asociados con [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md). Por ejemplo, agregar o fusionar datos. Puede utilizar el registro para simular algoritmos de combinación y comparar sus características. Puede visualizar el proceso de fusión. - -Las consultas se registran en el [sistema.part_log](../../operations/system-tables.md#system_tables-part-log) tabla, no en un archivo separado. Puede configurar el nombre de esta tabla en el `table` parámetro (ver más abajo). - -Utilice los siguientes parámetros para configurar el registro: - -- `database` – Name of the database. -- `table` – Name of the system table. -- `partition_by` – Sets a [clave de partición personalizada](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -**Ejemplo** - -``` xml - - system - part_log
- toMonday(event_date) - 7500 -
-``` - -## camino {#server_configuration_parameters-path} - -La ruta de acceso al directorio que contiene los datos. - -!!! note "Nota" - La barra diagonal es obligatoria. - -**Ejemplo** - -``` xml -/var/lib/clickhouse/ -``` - -## prometeo {#server_configuration_parameters-prometheus} - -Exponer datos de métricas para raspar desde [Prometeo](https://prometheus.io). - -Configuración: - -- `endpoint` – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. -- `port` – Port for `endpoint`. -- `metrics` – Flag that sets to expose metrics from the [sistema.métricas](../system-tables.md#system_tables-metrics) tabla. -- `events` – Flag that sets to expose metrics from the [sistema.evento](../system-tables.md#system_tables-events) tabla. -- `asynchronous_metrics` – Flag that sets to expose current metrics values from the [sistema.asynchronous_metrics](../system-tables.md#system_tables-asynchronous_metrics) tabla. - -**Ejemplo** - -``` xml - - /metrics - 8001 - true - true - true - -``` - -## query_log {#server_configuration_parameters-query-log} - -Configuración de las consultas de registro recibidas con [log_queries=1](../settings/settings.md) configuración. - -Las consultas se registran en el [sistema.query_log](../../operations/system-tables.md#system_tables-query_log) tabla, no en un archivo separado. Puede cambiar el nombre de la tabla en el `table` parámetro (ver más abajo). - -Utilice los siguientes parámetros para configurar el registro: - -- `database` – Name of the database. -- `table` – Name of the system table the queries will be logged in. -- `partition_by` – Sets a [clave de partición personalizada](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) para una mesa. -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -Si la tabla no existe, ClickHouse la creará. Si la estructura del registro de consultas cambió cuando se actualizó el servidor ClickHouse, se cambia el nombre de la tabla con la estructura anterior y se crea una nueva tabla automáticamente. - -**Ejemplo** - -``` xml - - system - query_log
- toMonday(event_date) - 7500 -
-``` - -## Sistema abierto {#server_configuration_parameters-query-thread-log} - -Configuración de subprocesos de registro de consultas recibidas con [Log_query_threads = 1](../settings/settings.md#settings-log-query-threads) configuración. - -Las consultas se registran en el [sistema.Sistema abierto.](../../operations/system-tables.md#system_tables-query-thread-log) tabla, no en un archivo separado. Puede cambiar el nombre de la tabla en el `table` parámetro (ver más abajo). - -Utilice los siguientes parámetros para configurar el registro: - -- `database` – Name of the database. -- `table` – Name of the system table the queries will be logged in. -- `partition_by` – Sets a [clave de partición personalizada](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) para una tabla del sistema. -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -Si la tabla no existe, ClickHouse la creará. Si la estructura del registro de subprocesos de consulta cambió cuando se actualizó el servidor ClickHouse, se cambia el nombre de la tabla con la estructura anterior y se crea una nueva tabla automáticamente. - -**Ejemplo** - -``` xml - - system - query_thread_log
- toMonday(event_date) - 7500 -
-``` - -## trace_log {#server_configuration_parameters-trace_log} - -Ajustes para el [trace_log](../../operations/system-tables.md#system_tables-trace_log) operación de la tabla del sistema. - -Parámetros: - -- `database` — Database for storing a table. -- `table` — Table name. -- `partition_by` — [Clave de partición personalizada](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) para una tabla del sistema. -- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. - -El archivo de configuración del servidor predeterminado `config.xml` contiene la siguiente sección de configuración: - -``` xml - - system - trace_log
- toYYYYMM(event_date) - 7500 -
-``` - -## query_masking_rules {#query-masking-rules} - -Reglas basadas en Regexp, que se aplicarán a las consultas, así como a todos los mensajes de registro antes de almacenarlos en los registros del servidor, -`system.query_log`, `system.text_log`, `system.processes` tabla, y en los registros enviados al cliente. Eso permite prevenir -fuga de datos sensible de consultas SQL (como nombres, correos electrónicos, -identificadores o números de tarjetas de crédito) a los registros. - -**Ejemplo** - -``` xml - - - hide SSN - (^|\D)\d{3}-\d{2}-\d{4}($|\D) - 000-00-0000 - - -``` - -Campos de configuración: -- `name` - nombre de la regla (opcional) -- `regexp` - Expresión regular compatible con RE2 (obligatoria) -- `replace` - cadena de sustitución para datos confidenciales (opcional, por defecto - seis asteriscos) - -Las reglas de enmascaramiento se aplican a toda la consulta (para evitar fugas de datos confidenciales de consultas mal formadas / no analizables). - -`system.events` la tabla tiene contador `QueryMaskingRulesMatch` que tienen un número total de coincidencias de reglas de enmascaramiento de consultas. - -Para consultas distribuidas, cada servidor debe configurarse por separado; de lo contrario, las subconsultas pasan a otros -los nodos se almacenarán sin enmascarar. - -## remote_servers {#server-settings-remote-servers} - -Configuración de los clústeres utilizados por [Distribuido](../../engines/table-engines/special/distributed.md) motor de mesa y por el `cluster` función de la tabla. - -**Ejemplo** - -``` xml - -``` - -Para el valor de la `incl` atributo, consulte la sección “[Archivos de configuración](../configuration-files.md#configuration_files)”. - -**Ver también** - -- [skip_unavailable_shards](../settings/settings.md#settings-skip_unavailable_shards) - -## Zona horaria {#server_configuration_parameters-timezone} - -La zona horaria del servidor. - -Especificado como un identificador de la IANA para la zona horaria UTC o la ubicación geográfica (por ejemplo, África/Abidjan). - -La zona horaria es necesaria para las conversiones entre los formatos String y DateTime cuando los campos DateTime se envían al formato de texto (impreso en la pantalla o en un archivo) y cuando se obtiene DateTime de una cadena. Además, la zona horaria se usa en funciones que funcionan con la hora y la fecha si no recibieron la zona horaria en los parámetros de entrada. - -**Ejemplo** - -``` xml -Europe/Moscow -``` - -## Tcp_port {#server_configuration_parameters-tcp_port} - -Puerto para comunicarse con clientes a través del protocolo TCP. - -**Ejemplo** - -``` xml -9000 -``` - -## Tcp_port_secure {#server_configuration_parameters-tcp_port_secure} - -Puerto TCP para una comunicación segura con los clientes. Úselo con [OpenSSL](#server_configuration_parameters-openssl) configuración. - -**Valores posibles** - -Entero positivo. - -**Valor predeterminado** - -``` xml -9440 -``` - -## mysql_port {#server_configuration_parameters-mysql_port} - -Puerto para comunicarse con clientes a través del protocolo MySQL. - -**Valores posibles** - -Entero positivo. - -Ejemplo - -``` xml -9004 -``` - -## tmp_path {#server-settings-tmp_path} - -Ruta de acceso a datos temporales para procesar consultas grandes. - -!!! note "Nota" - La barra diagonal es obligatoria. - -**Ejemplo** - -``` xml -/var/lib/clickhouse/tmp/ -``` - -## tmp_policy {#server-settings-tmp-policy} - -Política de [`storage_configuration`](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) para almacenar archivos temporales. -Si no se establece [`tmp_path`](#server-settings-tmp_path) se utiliza, de lo contrario se ignora. - -!!! note "Nota" - - `move_factor` se ignora -- `keep_free_space_bytes` se ignora -- `max_data_part_size_bytes` se ignora -- debe tener exactamente un volumen en esa política - -## Uncompressed_cache_size {#server-settings-uncompressed_cache_size} - -Tamaño de la memoria caché (en bytes) para los datos sin comprimir utilizados por los motores de [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md). - -Hay una caché compartida para el servidor. La memoria se asigna a pedido. La caché se usa si la opción [Use_uncompressed_cache](../settings/settings.md#setting-use_uncompressed_cache) está habilitado. - -La caché sin comprimir es ventajosa para consultas muy cortas en casos individuales. - -**Ejemplo** - -``` xml -8589934592 -``` - -## user_files_path {#server_configuration_parameters-user_files_path} - -El directorio con archivos de usuario. Utilizado en la función de tabla [file()](../../sql-reference/table-functions/file.md). - -**Ejemplo** - -``` xml -/var/lib/clickhouse/user_files/ -``` - -## users_config {#users-config} - -Ruta de acceso al archivo que contiene: - -- Configuraciones de usuario. -- Derechos de acceso. -- Perfiles de configuración. -- Configuración de cuota. - -**Ejemplo** - -``` xml -users.xml -``` - -## Zookeeper {#server-settings_zookeeper} - -Contiene la configuración que permite a ClickHouse interactuar con [ZooKeeper](http://zookeeper.apache.org/) Cluster. - -ClickHouse utiliza ZooKeeper para almacenar metadatos de réplicas cuando se utilizan tablas replicadas. Si no se utilizan tablas replicadas, se puede omitir esta sección de parámetros. - -Esta sección contiene los siguientes parámetros: - -- `node` — ZooKeeper endpoint. You can set multiple endpoints. - - Por ejemplo: - - - -``` xml - - example_host - 2181 - -``` - - The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. - -- `session_timeout` — Maximum timeout for the client session in milliseconds. -- `root` — The [Znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) que se utiliza como la raíz de los znodes utilizados por el servidor ClickHouse. Opcional. -- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. - -**Ejemplo de configuración** - -``` xml - - - example1 - 2181 - - - example2 - 2181 - - 30000 - 10000 - - /path/to/zookeeper/node - - user:password - -``` - -**Ver también** - -- [Replicación](../../engines/table-engines/mergetree-family/replication.md) -- [Guía del programador ZooKeeper](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) - -## use_minimalistic_part_header_in_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} - -Método de almacenamiento para encabezados de parte de datos en ZooKeeper. - -Esta configuración sólo se aplica a `MergeTree` familia. Se puede especificar: - -- A nivel mundial en el [merge_tree](#server_configuration_parameters-merge_tree) sección de la `config.xml` file. - - ClickHouse utiliza la configuración para todas las tablas del servidor. Puede cambiar la configuración en cualquier momento. Las tablas existentes cambian su comportamiento cuando cambia la configuración. - -- Para cada tabla. - - Al crear una tabla, especifique la correspondiente [ajuste del motor](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table). El comportamiento de una tabla existente con esta configuración no cambia, incluso si la configuración global cambia. - -**Valores posibles** - -- 0 — Functionality is turned off. -- 1 — Functionality is turned on. - -Si `use_minimalistic_part_header_in_zookeeper = 1`, entonces [repetición](../../engines/table-engines/mergetree-family/replication.md) las tablas almacenan los encabezados de las partes de datos de forma compacta `znode`. Si la tabla contiene muchas columnas, este método de almacenamiento reduce significativamente el volumen de los datos almacenados en Zookeeper. - -!!! attention "Atención" - Después de aplicar `use_minimalistic_part_header_in_zookeeper = 1`, no puede degradar el servidor ClickHouse a una versión que no admite esta configuración. Tenga cuidado al actualizar ClickHouse en servidores de un clúster. No actualice todos los servidores a la vez. Es más seguro probar nuevas versiones de ClickHouse en un entorno de prueba o solo en unos pocos servidores de un clúster. - - Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. - -**Valor predeterminado:** 0. - -## disable_internal_dns_cache {#server-settings-disable-internal-dns-cache} - -Deshabilita la memoria caché DNS interna. Recomendado para operar ClickHouse en sistemas -con infraestructura que cambia frecuentemente como Kubernetes. - -**Valor predeterminado:** 0. - -## dns_cache_update_period {#server-settings-dns-cache-update-period} - -El período de actualización de las direcciones IP almacenadas en la caché DNS interna de ClickHouse (en segundos). -La actualización se realiza de forma asíncrona, en un subproceso del sistema separado. - -**Valor predeterminado**: 15. - -## access_control_path {#access_control_path} - -Ruta de acceso a una carpeta donde un servidor ClickHouse almacena configuraciones de usuario y rol creadas por comandos SQL. - -Valor predeterminado: `/var/lib/clickhouse/access/`. - -**Ver también** - -- [Control de acceso y gestión de cuentas](../access-rights.md#access-control) - -[Artículo Original](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) diff --git a/docs/es/operations/settings/constraints-on-settings.md b/docs/es/operations/settings/constraints-on-settings.md deleted file mode 100644 index fe385f6ddbb..00000000000 --- a/docs/es/operations/settings/constraints-on-settings.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 62 -toc_title: "Restricciones en la configuraci\xF3n" ---- - -# Restricciones en la configuración {#constraints-on-settings} - -Las restricciones en los ajustes se pueden definir en el `profiles` sección de la `user.xml` el archivo de configuración y prohíba a los usuarios cambiar algunos de los ajustes `SET` consulta. -Las restricciones se definen como las siguientes: - -``` xml - - - - - lower_boundary - - - upper_boundary - - - lower_boundary - upper_boundary - - - - - - - -``` - -Si el usuario intenta violar las restricciones, se lanza una excepción y la configuración no se cambia. -Se admiten tres tipos de restricciones: `min`, `max`, `readonly`. El `min` y `max` Las restricciones especifican los límites superior e inferior para una configuración numérica y se pueden usar en combinación. El `readonly` constraint especifica que el usuario no puede cambiar la configuración correspondiente en absoluto. - -**Ejemplo:** Dejar `users.xml` incluye líneas: - -``` xml - - - 10000000000 - 0 - ... - - - 5000000000 - 20000000000 - - - - - - - -``` - -Las siguientes consultas arrojan excepciones: - -``` sql -SET max_memory_usage=20000000001; -SET max_memory_usage=4999999999; -SET force_index_by_date=1; -``` - -``` text -Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be greater than 20000000000. -Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be less than 5000000000. -Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should not be changed. -``` - -**Nota:** el `default` perfil tiene un manejo especial: todas las restricciones definidas para el `default` profile se convierten en las restricciones predeterminadas, por lo que restringen a todos los usuarios hasta que se anulan explícitamente para estos usuarios. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) diff --git a/docs/es/operations/settings/index.md b/docs/es/operations/settings/index.md deleted file mode 100644 index 37aab0a7e1b..00000000000 --- a/docs/es/operations/settings/index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Configuraci\xF3n" -toc_priority: 55 -toc_title: "Implantaci\xF3n" ---- - -# Configuración {#session-settings-intro} - -Hay varias maneras de realizar todos los ajustes descritos en esta sección de documentación. - -Los ajustes se configuran en capas, por lo que cada capa subsiguiente redefine los ajustes anteriores. - -Formas de configurar los ajustes, por orden de prioridad: - -- Ajustes en el `users.xml` archivo de configuración del servidor. - - Establecer en el elemento ``. - -- Configuración de la sesión. - - Enviar `SET setting=value` desde el cliente de consola ClickHouse en modo interactivo. - Del mismo modo, puede utilizar sesiones ClickHouse en el protocolo HTTP. Para hacer esto, debe especificar el `session_id` Parámetro HTTP. - -- Configuración de consulta. - - - Al iniciar el cliente de consola de ClickHouse en modo no interactivo, establezca el parámetro de inicio `--setting=value`. - - Al usar la API HTTP, pase los parámetros CGI (`URL?setting_1=value&setting_2=value...`). - -Los ajustes que solo se pueden realizar en el archivo de configuración del servidor no se tratan en esta sección. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/) diff --git a/docs/es/operations/settings/permissions-for-queries.md b/docs/es/operations/settings/permissions-for-queries.md deleted file mode 100644 index f9f669b876e..00000000000 --- a/docs/es/operations/settings/permissions-for-queries.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 58 -toc_title: Permisos para consultas ---- - -# Permisos para consultas {#permissions_for_queries} - -Las consultas en ClickHouse se pueden dividir en varios tipos: - -1. Leer consultas de datos: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. -2. Escribir consultas de datos: `INSERT`, `OPTIMIZE`. -3. Cambiar la consulta de configuración: `SET`, `USE`. -4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) consulta: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. -5. `KILL QUERY`. - -La siguiente configuración regula los permisos de usuario según el tipo de consulta: - -- [sólo lectura](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. -- [Método de codificación de datos:](#settings_allow_ddl) — Restricts permissions for DDL queries. - -`KILL QUERY` se puede realizar con cualquier configuración. - -## sólo lectura {#settings_readonly} - -Restringe los permisos para leer datos, escribir datos y cambiar las consultas de configuración. - -Vea cómo las consultas se dividen en tipos [arriba](#permissions_for_queries). - -Valores posibles: - -- 0 — All queries are allowed. -- 1 — Only read data queries are allowed. -- 2 — Read data and change settings queries are allowed. - -Después de configurar `readonly = 1` el usuario no puede cambiar `readonly` y `allow_ddl` configuración en la sesión actual. - -Cuando se utiliza el `GET` método en el [Interfaz HTTP](../../interfaces/http.md), `readonly = 1` se establece automáticamente. Para modificar los datos, `POST` método. - -Configuración `readonly = 1` prohibir al usuario cambiar todas las configuraciones. Hay una manera de prohibir al usuario -de cambiar sólo ajustes específicos, para más detalles ver [restricciones en la configuración](constraints-on-settings.md). - -Valor predeterminado: 0 - -## Método de codificación de datos: {#settings_allow_ddl} - -Permite o niega [DDL](https://en.wikipedia.org/wiki/Data_definition_language) consulta. - -Vea cómo las consultas se dividen en tipos [arriba](#permissions_for_queries). - -Valores posibles: - -- 0 — DDL queries are not allowed. -- 1 — DDL queries are allowed. - -No se puede ejecutar `SET allow_ddl = 1` si `allow_ddl = 0` para la sesión actual. - -Valor predeterminado: 1 - -[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) diff --git a/docs/es/operations/settings/query-complexity.md b/docs/es/operations/settings/query-complexity.md deleted file mode 100644 index 82bc235c30d..00000000000 --- a/docs/es/operations/settings/query-complexity.md +++ /dev/null @@ -1,300 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 59 -toc_title: Restricciones en la complejidad de consultas ---- - -# Restricciones en la complejidad de consultas {#restrictions-on-query-complexity} - -Las restricciones en la complejidad de la consulta forman parte de la configuración. -Se utilizan para proporcionar una ejecución más segura desde la interfaz de usuario. -Casi todas las restricciones solo se aplican a `SELECT`. Para el procesamiento de consultas distribuidas, las restricciones se aplican en cada servidor por separado. - -ClickHouse comprueba las restricciones para las partes de datos, no para cada fila. Significa que puede exceder el valor de restricción con el tamaño de la parte de datos. - -Restricciones en el “maximum amount of something” puede tomar el valor 0, lo que significa “unrestricted”. -La mayoría de las restricciones también tienen un ‘overflow_mode’ establecer, lo que significa qué hacer cuando se excede el límite. -Puede tomar uno de dos valores: `throw` o `break`. Las restricciones en la agregación (group_by_overflow_mode) también tienen el valor `any`. - -`throw` – Throw an exception (default). - -`break` – Stop executing the query and return the partial result, as if the source data ran out. - -`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don't add new keys to the set. - -## Método de codificación de datos: {#settings_max_memory_usage} - -La cantidad máxima de RAM que se utiliza para ejecutar una consulta en un único servidor. - -En el archivo de configuración predeterminado, el máximo es de 10 GB. - -La configuración no tiene en cuenta el volumen de memoria disponible ni el volumen total de memoria en la máquina. -La restricción se aplica a una sola consulta dentro de un único servidor. -Usted puede utilizar `SHOW PROCESSLIST` para ver el consumo de memoria actual para cada consulta. -Además, el consumo máximo de memoria se rastrea para cada consulta y se escribe en el registro. - -El uso de memoria no se supervisa para los estados de ciertas funciones agregadas. - -El uso de memoria no se realiza un seguimiento completo de los estados de las funciones agregadas `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` de `String` y `Array` argumento. - -El consumo de memoria también está restringido por los parámetros `max_memory_usage_for_user` y `max_memory_usage_for_all_queries`. - -## Max_memory_usage_for_user {#max-memory-usage-for-user} - -La cantidad máxima de RAM que se utilizará para ejecutar las consultas de un usuario en un único servidor. - -Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L288). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_user = 0`). - -Ver también la descripción de [Método de codificación de datos:](#settings_max_memory_usage). - -## Todos los derechos reservados {#max-memory-usage-for-all-queries} - -La cantidad máxima de RAM que se utilizará para ejecutar todas las consultas en un único servidor. - -Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L289). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_all_queries = 0`). - -Ver también la descripción de [Método de codificación de datos:](#settings_max_memory_usage). - -## ¿Qué puedes encontrar en Neodigit {#max-rows-to-read} - -Las siguientes restricciones se pueden verificar en cada bloque (en lugar de en cada fila). Es decir, las restricciones se pueden romper un poco. - -Un número máximo de filas que se pueden leer de una tabla al ejecutar una consulta. - -## ¿Qué puedes encontrar en Neodigit {#max-bytes-to-read} - -Un número máximo de bytes (datos sin comprimir) que se pueden leer de una tabla al ejecutar una consulta. - -## Método de codificación de datos: {#read-overflow-mode} - -Qué hacer cuando el volumen de datos leídos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. - -## Método de codificación de datos: {#settings-max-rows-to-group-by} - -Un número máximo de claves únicas recibidas de la agregación. Esta configuración le permite limitar el consumo de memoria al agregar. - -## Grupo_by_overflow_mode {#group-by-overflow-mode} - -Qué hacer cuando el número de claves únicas para la agregación excede el límite: ‘throw’, ‘break’, o ‘any’. Por defecto, throw. -Uso de la ‘any’ valor le permite ejecutar una aproximación de GROUP BY. La calidad de esta aproximación depende de la naturaleza estadística de los datos. - -## max_bytes_before_external_group_by {#settings-max_bytes_before_external_group_by} - -Habilita o deshabilita la ejecución de `GROUP BY` en la memoria externa. Ver [GROUP BY en memoria externa](../../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory). - -Valores posibles: - -- Volumen máximo de RAM (en bytes) que puede ser utilizado por el único [GROUP BY](../../sql-reference/statements/select/group-by.md#select-group-by-clause) operación. -- 0 — `GROUP BY` en la memoria externa deshabilitada. - -Valor predeterminado: 0. - -## Método de codificación de datos: {#max-rows-to-sort} - -Un número máximo de filas antes de ordenar. Esto le permite limitar el consumo de memoria al ordenar. - -## Método de codificación de datos: {#max-bytes-to-sort} - -Un número máximo de bytes antes de ordenar. - -## sort_overflow_mode {#sort-overflow-mode} - -Qué hacer si el número de filas recibidas antes de ordenar excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. - -## max_result_rows {#setting-max_result_rows} - -Límite en el número de filas en el resultado. También se comprueba si hay subconsultas y en servidores remotos cuando se ejecutan partes de una consulta distribuida. - -## max_result_bytes {#max-result-bytes} - -Límite en el número de bytes en el resultado. Lo mismo que el ajuste anterior. - -## result_overflow_mode {#result-overflow-mode} - -Qué hacer si el volumen del resultado excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. - -Utilizar ‘break’ es similar a usar LIMIT. `Break` interrumpe la ejecución sólo en el nivel de bloque. Esto significa que la cantidad de filas devueltas es mayor que [max_result_rows](#setting-max_result_rows), múltiplo de [max_block_size](settings.md#setting-max_block_size) y depende de [max_threads](settings.md#settings-max_threads). - -Ejemplo: - -``` sql -SET max_threads = 3, max_block_size = 3333; -SET max_result_rows = 3334, result_overflow_mode = 'break'; - -SELECT * -FROM numbers_mt(100000) -FORMAT Null; -``` - -Resultado: - -``` text -6666 rows in set. ... -``` - -## max_execution_time {#max-execution-time} - -Tiempo máximo de ejecución de la consulta en segundos. -En este momento, no se comprueba una de las etapas de clasificación, o al fusionar y finalizar funciones agregadas. - -## timeout_overflow_mode {#timeout-overflow-mode} - -Qué hacer si la consulta se ejecuta más de ‘max_execution_time’: ‘throw’ o ‘break’. Por defecto, throw. - -## Método de codificación de datos: {#min-execution-speed} - -Velocidad de ejecución mínima en filas por segundo. Comprobado en cada bloque de datos cuando ‘timeout_before_checking_execution_speed’ expirar. Si la velocidad de ejecución es menor, se produce una excepción. - -## Todos los derechos reservados {#min-execution-speed-bytes} - -Un número mínimo de bytes de ejecución por segundo. Comprobado en cada bloque de datos cuando ‘timeout_before_checking_execution_speed’ expirar. Si la velocidad de ejecución es menor, se produce una excepción. - -## Max_execution_speed {#max-execution-speed} - -Un número máximo de filas de ejecución por segundo. Comprobado en cada bloque de datos cuando ‘timeout_before_checking_execution_speed’ expirar. Si la velocidad de ejecución es alta, la velocidad de ejecución se reducirá. - -## Max_execution_speed_bytes {#max-execution-speed-bytes} - -Un número máximo de bytes de ejecución por segundo. Comprobado en cada bloque de datos cuando ‘timeout_before_checking_execution_speed’ expirar. Si la velocidad de ejecución es alta, la velocidad de ejecución se reducirá. - -## Tiempo de espera antes de comprobar_ejecución_velocidad {#timeout-before-checking-execution-speed} - -Comprueba que la velocidad de ejecución no sea demasiado lenta (no menos de ‘min_execution_speed’), después de que el tiempo especificado en segundos haya expirado. - -## Max_columns_to_read {#max-columns-to-read} - -Un número máximo de columnas que se pueden leer de una tabla en una sola consulta. Si una consulta requiere leer un mayor número de columnas, produce una excepción. - -## max_temporary_columns {#max-temporary-columns} - -Un número máximo de columnas temporales que se deben mantener en la memoria RAM al mismo tiempo cuando se ejecuta una consulta, incluidas las columnas constantes. Si hay más columnas temporales que esto, arroja una excepción. - -## max_temporary_non_const_columns {#max-temporary-non-const-columns} - -Lo mismo que ‘max_temporary_columns’, pero sin contar columnas constantes. -Tenga en cuenta que las columnas constantes se forman con bastante frecuencia cuando se ejecuta una consulta, pero requieren aproximadamente cero recursos informáticos. - -## max_subquery_depth {#max-subquery-depth} - -Profundidad máxima de anidamiento de subconsultas. Si las subconsultas son más profundas, se produce una excepción. De forma predeterminada, 100. - -## max_pipeline_depth {#max-pipeline-depth} - -Profundidad máxima de la tubería. Corresponde al número de transformaciones que realiza cada bloque de datos durante el procesamiento de consultas. Contado dentro de los límites de un único servidor. Si la profundidad de la canalización es mayor, se produce una excepción. Por defecto, 1000. - -## max_ast_depth {#max-ast-depth} - -Profundidad máxima de anidamiento de un árbol sintáctico de consulta. Si se supera, se produce una excepción. -En este momento, no se verifica durante el análisis, sino solo después de analizar la consulta. Es decir, se puede crear un árbol sintáctico demasiado profundo durante el análisis, pero la consulta fallará. Por defecto, 1000. - -## max_ast_elements {#max-ast-elements} - -Un número máximo de elementos en un árbol sintáctico de consulta. Si se supera, se produce una excepción. -De la misma manera que la configuración anterior, se verifica solo después de analizar la consulta. De forma predeterminada, 50.000. - -## Método de codificación de datos: {#max-rows-in-set} - -Un número máximo de filas para un conjunto de datos en la cláusula IN creada a partir de una subconsulta. - -## Método de codificación de datos: {#max-bytes-in-set} - -Número máximo de bytes (datos sin comprimir) utilizados por un conjunto en la cláusula IN creada a partir de una subconsulta. - -## set_overflow_mode {#set-overflow-mode} - -Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. - -## Método de codificación de datos: {#max-rows-in-distinct} - -Un número máximo de filas diferentes al usar DISTINCT. - -## Método de codificación de datos: {#max-bytes-in-distinct} - -Un número máximo de bytes utilizados por una tabla hash cuando se utiliza DISTINCT. - -## distinct_overflow_mode {#distinct-overflow-mode} - -Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. - -## max_rows_to_transfer {#max-rows-to-transfer} - -Un número máximo de filas que se pueden pasar a un servidor remoto o guardar en una tabla temporal cuando se utiliza GLOBAL IN. - -## max_bytes_to_transfer {#max-bytes-to-transfer} - -Un número máximo de bytes (datos sin comprimir) que se pueden pasar a un servidor remoto o guardar en una tabla temporal cuando se utiliza GLOBAL IN. - -## transfer_overflow_mode {#transfer-overflow-mode} - -Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. - -## Método de codificación de datos: {#settings-max_rows_in_join} - -Limita el número de filas de la tabla hash que se utiliza al unir tablas. - -Esta configuración se aplica a [SELECT … JOIN](../../sql-reference/statements/select/join.md#select-join) operaciones y la [Unir](../../engines/table-engines/special/join.md) motor de mesa. - -Si una consulta contiene varias combinaciones, ClickHouse comprueba esta configuración para cada resultado intermedio. - -ClickHouse puede proceder con diferentes acciones cuando se alcanza el límite. Utilice el [join_overflow_mode](#settings-join_overflow_mode) configuración para elegir la acción. - -Valores posibles: - -- Entero positivo. -- 0 — Unlimited number of rows. - -Valor predeterminado: 0. - -## Método de codificación de datos: {#settings-max_bytes_in_join} - -Limita el tamaño en bytes de la tabla hash utilizada al unir tablas. - -Esta configuración se aplica a [SELECT … JOIN](../../sql-reference/statements/select/join.md#select-join) operaciones y [Unirse al motor de tabla](../../engines/table-engines/special/join.md). - -Si la consulta contiene combinaciones, ClickHouse comprueba esta configuración para cada resultado intermedio. - -ClickHouse puede proceder con diferentes acciones cuando se alcanza el límite. Utilizar [join_overflow_mode](#settings-join_overflow_mode) para elegir la acción. - -Valores posibles: - -- Entero positivo. -- 0 — Memory control is disabled. - -Valor predeterminado: 0. - -## join_overflow_mode {#settings-join_overflow_mode} - -Define qué acción realiza ClickHouse cuando se alcanza cualquiera de los siguientes límites de combinación: - -- [Método de codificación de datos:](#settings-max_bytes_in_join) -- [Método de codificación de datos:](#settings-max_rows_in_join) - -Valores posibles: - -- `THROW` — ClickHouse throws an exception and breaks operation. -- `BREAK` — ClickHouse breaks operation and doesn't throw an exception. - -Valor predeterminado: `THROW`. - -**Ver también** - -- [Cláusula JOIN](../../sql-reference/statements/select/join.md#select-join) -- [Unirse al motor de tabla](../../engines/table-engines/special/join.md) - -## max_partitions_per_insert_block {#max-partitions-per-insert-block} - -Limita el número máximo de particiones en un único bloque insertado. - -- Entero positivo. -- 0 — Unlimited number of partitions. - -Valor predeterminado: 100. - -**Detalles** - -Al insertar datos, ClickHouse calcula el número de particiones en el bloque insertado. Si el número de particiones es mayor que `max_partitions_per_insert_block`, ClickHouse lanza una excepción con el siguiente texto: - -> “Too many partitions for single INSERT block (more than” ¿Cómo puedo hacerlo? “). The limit is controlled by ‘max_partitions_per_insert_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).” - -[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) diff --git a/docs/es/operations/settings/settings-profiles.md b/docs/es/operations/settings/settings-profiles.md deleted file mode 100644 index 3d96a2c8fba..00000000000 --- a/docs/es/operations/settings/settings-profiles.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 61 -toc_title: "Perfiles de configuraci\xF3n" ---- - -# Perfiles de configuración {#settings-profiles} - -Un perfil de configuración es una colección de configuraciones agrupadas con el mismo nombre. - -!!! note "Información" - ClickHouse también es compatible [Flujo de trabajo controlado por SQL](../access-rights.md#access-control) para administrar perfiles de configuración. Recomendamos usarlo. - -Un perfil puede tener cualquier nombre. El perfil puede tener cualquier nombre. Puede especificar el mismo perfil para diferentes usuarios. Lo más importante que puede escribir en el perfil de configuración es `readonly=1`, que asegura el acceso de sólo lectura. - -Los perfiles de configuración pueden heredar unos de otros. Para usar la herencia, indique una o varias `profile` configuraciones antes de las demás configuraciones que se enumeran en el perfil. En caso de que se defina una configuración en diferentes perfiles, se utiliza la última definida. - -Para aplicar todos los ajustes de un perfil, establezca el `profile` configuración. - -Ejemplo: - -Instale el `web` perfil. - -``` sql -SET profile = 'web' -``` - -Los perfiles de configuración se declaran en el archivo de configuración del usuario. Esto suele ser `users.xml`. - -Ejemplo: - -``` xml - - - - - - 8 - - - - - 1000000000 - 100000000000 - - 1000000 - any - - 1000000 - 1000000000 - - 100000 - 100000000 - break - - 600 - 1000000 - 15 - - 25 - 100 - 50 - - 2 - 25 - 50 - 100 - - 1 - - -``` - -El ejemplo especifica dos perfiles: `default` y `web`. - -El `default` tiene un propósito especial: siempre debe estar presente y se aplica al iniciar el servidor. En otras palabras, el `default` perfil contiene la configuración predeterminada. - -El `web` profile es un perfil regular que se puede establecer utilizando el `SET` consulta o utilizando un parámetro URL en una consulta HTTP. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) diff --git a/docs/es/operations/settings/settings-users.md b/docs/es/operations/settings/settings-users.md deleted file mode 100644 index 1c1ac7914f0..00000000000 --- a/docs/es/operations/settings/settings-users.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 63 -toc_title: "Configuraci\xF3n del usuario" ---- - -# Configuración del usuario {#user-settings} - -El `users` sección de la `user.xml` el archivo de configuración contiene la configuración del usuario. - -!!! note "Información" - ClickHouse también es compatible [Flujo de trabajo controlado por SQL](../access-rights.md#access-control) para la gestión de usuarios. Recomendamos usarlo. - -Estructura del `users` apartado: - -``` xml - - - - - - - - 0|1 - - - - - profile_name - - default - - - - - expression - - - - - - -``` - -### user_name/contraseña {#user-namepassword} - -La contraseña se puede especificar en texto sin formato o en SHA256 (formato hexagonal). - -- Para asignar una contraseña en texto sin formato (**no se recomienda**), colóquelo en un `password` elemento. - - Por ejemplo, `qwerty`. La contraseña se puede dejar en blanco. - - - -- Para asignar una contraseña utilizando su hash SHA256, colóquela en un `password_sha256_hex` elemento. - - Por ejemplo, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. - - Ejemplo de cómo generar una contraseña desde el shell: - - PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' - - La primera línea del resultado es la contraseña. La segunda línea es el hash SHA256 correspondiente. - - - -- Para la compatibilidad con los clientes MySQL, la contraseña se puede especificar en doble hash SHA1. Colóquelo en `password_double_sha1_hex` elemento. - - Por ejemplo, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. - - Ejemplo de cómo generar una contraseña desde el shell: - - PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' - - La primera línea del resultado es la contraseña. La segunda línea es el hash SHA1 doble correspondiente. - -### access_management {#access_management-user-setting} - -Esta configuración habilita deshabilita el uso de [control de acceso y gestión de cuentas](../access-rights.md#access-control) para el usuario. - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 0. - -### user_name/redes {#user-namenetworks} - -Lista de redes desde las que el usuario puede conectarse al servidor ClickHouse. - -Cada elemento de la lista puede tener una de las siguientes formas: - -- `` — IP address or network mask. - - Ejemplos: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. - -- `` — Hostname. - - Ejemplo: `example01.host.ru`. - - Para comprobar el acceso, se realiza una consulta DNS y todas las direcciones IP devueltas se comparan con la dirección del mismo nivel. - -- `` — Regular expression for hostnames. - - Ejemplo, `^example\d\d-\d\d-\d\.host\.ru$` - - Para comprobar el acceso, un [Consulta de DNS PTR](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) se realiza para la dirección del mismo nivel y luego se aplica la expresión regular especificada. A continuación, se realiza otra consulta DNS para los resultados de la consulta PTR y todas las direcciones recibidas se comparan con la dirección del mismo nivel. Recomendamos encarecidamente que regexp termine con $ . - -Todos los resultados de las solicitudes DNS se almacenan en caché hasta que el servidor se reinicia. - -**Ejemplos** - -Para abrir el acceso del usuario desde cualquier red, especifique: - -``` xml -::/0 -``` - -!!! warning "Advertencia" - No es seguro abrir el acceso desde cualquier red a menos que tenga un firewall configurado correctamente o el servidor no esté conectado directamente a Internet. - -Para abrir el acceso solo desde localhost, especifique: - -``` xml -::1 -127.0.0.1 -``` - -### user_name/perfil {#user-nameprofile} - -Puede asignar un perfil de configuración para el usuario. Los perfiles de configuración se configuran en una sección separada del `users.xml` file. Para obtener más información, consulte [Perfiles de configuración](settings-profiles.md). - -### user_name/cuota {#user-namequota} - -Las cuotas le permiten realizar un seguimiento o limitar el uso de recursos durante un período de tiempo. Las cuotas se configuran en el `quotas` -sección de la `users.xml` archivo de configuración. - -Puede asignar un conjunto de cuotas para el usuario. Para obtener una descripción detallada de la configuración de las cuotas, consulte [Cuota](../quotas.md#quotas). - -### nombre_usuario/bases de datos {#user-namedatabases} - -En esta sección, puede limitar las filas devueltas por ClickHouse para `SELECT` consultas realizadas por el usuario actual, implementando así la seguridad básica a nivel de fila. - -**Ejemplo** - -La siguiente configuración obliga a que el usuario `user1` sólo puede ver las filas de `table1` como resultado de `SELECT` consultas, donde el valor de la `id` campo es 1000. - -``` xml - - - - - id = 1000 - - - - -``` - -El `filter` puede ser cualquier expresión que resulte en un [UInt8](../../sql-reference/data-types/int-uint.md)-tipo de valor. Por lo general, contiene comparaciones y operadores lógicos. Filas de `database_name.table1` donde los resultados del filtro a 0 no se devuelven para este usuario. El filtrado es incompatible con `PREWHERE` operaciones y desactiva `WHERE→PREWHERE` optimización. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/es/operations/settings/settings.md b/docs/es/operations/settings/settings.md deleted file mode 100644 index 62511dd9fc0..00000000000 --- a/docs/es/operations/settings/settings.md +++ /dev/null @@ -1,1254 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Configuración {#settings} - -## distributed_product_mode {#distributed-product-mode} - -Cambia el comportamiento de [subconsultas distribuidas](../../sql-reference/operators/in.md). - -ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. - -Restricción: - -- Solo se aplica para las subconsultas IN y JOIN. -- Solo si la sección FROM utiliza una tabla distribuida que contiene más de un fragmento. -- Si la subconsulta se refiere a una tabla distribuida que contiene más de un fragmento. -- No se usa para un valor de tabla [remoto](../../sql-reference/table-functions/remote.md) función. - -Valores posibles: - -- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” salvedad). -- `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.` -- `global` — Replaces the `IN`/`JOIN` consulta con `GLOBAL IN`/`GLOBAL JOIN.` -- `allow` — Allows the use of these types of subqueries. - -## enable_optimize_predicate_expression {#enable-optimize-predicate-expression} - -Activa el pushdown de predicado en `SELECT` consulta. - -La extracción de predicados puede reducir significativamente el tráfico de red para consultas distribuidas. - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 1. - -Uso - -Considere las siguientes consultas: - -1. `SELECT count() FROM test_table WHERE date = '2018-10-10'` -2. `SELECT count() FROM (SELECT * FROM test_table) WHERE date = '2018-10-10'` - -Si `enable_optimize_predicate_expression = 1`, entonces el tiempo de ejecución de estas consultas es igual porque se aplica ClickHouse `WHERE` a la subconsulta al procesarla. - -Si `enable_optimize_predicate_expression = 0`, entonces el tiempo de ejecución de la segunda consulta es mucho más largo, porque el `WHERE` cláusula se aplica a todos los datos después de que finalice la subconsulta. - -## fallback_to_stale_replicas_for_distributed_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} - -Fuerza una consulta a una réplica obsoleta si los datos actualizados no están disponibles. Ver [Replicación](../../engines/table-engines/mergetree-family/replication.md). - -ClickHouse selecciona la más relevante de las réplicas obsoletas de la tabla. - -Se utiliza al realizar `SELECT` desde una tabla distribuida que apunta a tablas replicadas. - -De forma predeterminada, 1 (habilitado). - -## Fecha de nacimiento {#settings-force_index_by_date} - -Deshabilita la ejecución de consultas si el índice no se puede usar por fecha. - -Funciona con tablas de la familia MergeTree. - -Si `force_index_by_date=1`, ClickHouse comprueba si la consulta tiene una condición de clave de fecha que se puede usar para restringir intervalos de datos. Si no hay una condición adecuada, arroja una excepción. Sin embargo, no comprueba si la condición reduce la cantidad de datos a leer. Por ejemplo, la condición `Date != ' 2000-01-01 '` es aceptable incluso cuando coincide con todos los datos de la tabla (es decir, ejecutar la consulta requiere un escaneo completo). Para obtener más información acerca de los intervalos de datos en las tablas MergeTree, vea [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md). - -## force_primary_key {#force-primary-key} - -Deshabilita la ejecución de consultas si no es posible la indexación mediante la clave principal. - -Funciona con tablas de la familia MergeTree. - -Si `force_primary_key=1`, ClickHouse comprueba si la consulta tiene una condición de clave principal que se puede usar para restringir rangos de datos. Si no hay una condición adecuada, arroja una excepción. Sin embargo, no comprueba si la condición reduce la cantidad de datos a leer. Para obtener más información acerca de los intervalos de datos en las tablas MergeTree, consulte [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md). - -## Formato_esquema {#format-schema} - -Este parámetro es útil cuando se utilizan formatos que requieren una definición de esquema, como [Cap'n Proto](https://capnproto.org/) o [Protobuf](https://developers.google.com/protocol-buffers/). El valor depende del formato. - -## fsync_metadata {#fsync-metadata} - -Habilita o deshabilita [fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) al escribir `.sql` file. Habilitado de forma predeterminada. - -Tiene sentido desactivarlo si el servidor tiene millones de pequeñas tablas que se crean y destruyen constantemente. - -## enable_http_compression {#settings-enable_http_compression} - -Habilita o deshabilita la compresión de datos en la respuesta a una solicitud HTTP. - -Para obtener más información, lea el [Descripción de la interfaz HTTP](../../interfaces/http.md). - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 0. - -## http_zlib_compression_level {#settings-http_zlib_compression_level} - -Establece el nivel de compresión de datos en la respuesta a una solicitud HTTP si [enable_http_compression = 1](#settings-enable_http_compression). - -Valores posibles: Números del 1 al 9. - -Valor predeterminado: 3. - -## http_native_compression_disable_checksumming_on_decompress {#settings-http_native_compression_disable_checksumming_on_decompress} - -Habilita o deshabilita la verificación de suma de comprobación al descomprimir los datos HTTP POST del cliente. Se usa solo para el formato de compresión nativa ClickHouse (no se usa con `gzip` o `deflate`). - -Para obtener más información, lea el [Descripción de la interfaz HTTP](../../interfaces/http.md). - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 0. - -## send_progress_in_http_headers {#settings-send_progress_in_http_headers} - -Habilita o deshabilita `X-ClickHouse-Progress` Encabezados de respuesta HTTP en `clickhouse-server` respuesta. - -Para obtener más información, lea el [Descripción de la interfaz HTTP](../../interfaces/http.md). - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 0. - -## Nombre de la red inalámbrica (SSID): {#setting-max_http_get_redirects} - -Limita el número máximo de saltos de redirección HTTP GET para [URL](../../engines/table-engines/special/url.md)-mesas de motor. La configuración se aplica a ambos tipos de tablas: las creadas por [CREATE TABLE](../../sql-reference/statements/create.md#create-table-query) consulta y por el [URL](../../sql-reference/table-functions/url.md) función de la tabla. - -Valores posibles: - -- Cualquier número entero positivo de saltos. -- 0 — No hops allowed. - -Valor predeterminado: 0. - -## Entrada_format_allow_errors_num {#settings-input_format_allow_errors_num} - -Establece el número máximo de errores aceptables al leer desde formatos de texto (CSV, TSV, etc.). - -El valor predeterminado es 0. - -Siempre emparejarlo con `input_format_allow_errors_ratio`. - -Si se produjo un error al leer filas, pero el contador de errores sigue siendo menor que `input_format_allow_errors_num`, ClickHouse ignora la fila y pasa a la siguiente. - -Si ambos `input_format_allow_errors_num` y `input_format_allow_errors_ratio` se exceden, ClickHouse lanza una excepción. - -## Entrada_format_allow_errors_ratio {#settings-input_format_allow_errors_ratio} - -Establece el porcentaje máximo de errores permitidos al leer desde formatos de texto (CSV, TSV, etc.). -El porcentaje de errores se establece como un número de punto flotante entre 0 y 1. - -El valor predeterminado es 0. - -Siempre emparejarlo con `input_format_allow_errors_num`. - -Si se produjo un error al leer filas, pero el contador de errores sigue siendo menor que `input_format_allow_errors_ratio`, ClickHouse ignora la fila y pasa a la siguiente. - -Si ambos `input_format_allow_errors_num` y `input_format_allow_errors_ratio` se exceden, ClickHouse lanza una excepción. - -## input_format_values_interpret_expressions {#settings-input_format_values_interpret_expressions} - -Habilita o deshabilita el analizador SQL completo si el analizador de secuencias rápidas no puede analizar los datos. Esta configuración sólo se utiliza para [Valor](../../interfaces/formats.md#data-format-values) formato en la inserción de datos. Para obtener más información sobre el análisis de sintaxis, consulte [Sintaxis](../../sql-reference/syntax.md) apartado. - -Valores posibles: - -- 0 — Disabled. - - En este caso, debe proporcionar datos con formato. Ver el [Formato](../../interfaces/formats.md) apartado. - -- 1 — Enabled. - - En este caso, puede usar una expresión SQL como valor, pero la inserción de datos es mucho más lenta de esta manera. Si inserta solo datos con formato, ClickHouse se comporta como si el valor de configuración fuera 0. - -Valor predeterminado: 1. - -Ejemplo de uso - -Inserte el [FechaHora](../../sql-reference/data-types/datetime.md) valor de tipo con los diferentes ajustes. - -``` sql -SET input_format_values_interpret_expressions = 0; -INSERT INTO datetime_t VALUES (now()) -``` - -``` text -Exception on client: -Code: 27. DB::Exception: Cannot parse input: expected ) before: now()): (at row 1) -``` - -``` sql -SET input_format_values_interpret_expressions = 1; -INSERT INTO datetime_t VALUES (now()) -``` - -``` text -Ok. -``` - -La última consulta es equivalente a la siguiente: - -``` sql -SET input_format_values_interpret_expressions = 0; -INSERT INTO datetime_t SELECT now() -``` - -``` text -Ok. -``` - -## input_format_values_deduce_templates_of_expressions {#settings-input_format_values_deduce_templates_of_expressions} - -Habilita o deshabilita la deducción de plantilla para expresiones SQL en [Valor](../../interfaces/formats.md#data-format-values) formato. Permite analizar e interpretar expresiones en `Values` mucho más rápido si las expresiones en filas consecutivas tienen la misma estructura. ClickHouse intenta deducir la plantilla de una expresión, analizar las siguientes filas utilizando esta plantilla y evaluar la expresión en un lote de filas analizadas correctamente. - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 1. - -Para la siguiente consulta: - -``` sql -INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (upper('Values')), ... -``` - -- Si `input_format_values_interpret_expressions=1` y `format_values_deduce_templates_of_expressions=0`, las expresiones se interpretan por separado para cada fila (esto es muy lento para un gran número de filas). -- Si `input_format_values_interpret_expressions=0` y `format_values_deduce_templates_of_expressions=1`, las expresiones en la primera, segunda y tercera filas se analizan usando la plantilla `lower(String)` e interpretados juntos, la expresión en la cuarta fila se analiza con otra plantilla (`upper(String)`). -- Si `input_format_values_interpret_expressions=1` y `format_values_deduce_templates_of_expressions=1`, lo mismo que en el caso anterior, pero también permite la alternativa a la interpretación de expresiones por separado si no es posible deducir la plantilla. - -## Entrada_format_values_accurate_types_of_literals {#settings-input-format-values-accurate-types-of-literals} - -Esta configuración sólo se utiliza cuando `input_format_values_deduce_templates_of_expressions = 1`. Puede suceder que las expresiones para alguna columna tengan la misma estructura, pero contengan literales numéricos de diferentes tipos, por ejemplo - -``` sql -(..., abs(0), ...), -- UInt64 literal -(..., abs(3.141592654), ...), -- Float64 literal -(..., abs(-1), ...), -- Int64 literal -``` - -Valores posibles: - -- 0 — Disabled. - - In this case, ClickHouse may use a more general type for some literals (e.g., `Float64` o `Int64` en lugar de `UInt64` para `42`), pero puede causar problemas de desbordamiento y precisión. - -- 1 — Enabled. - - En este caso, ClickHouse comprueba el tipo real de literal y utiliza una plantilla de expresión del tipo correspondiente. En algunos casos, puede ralentizar significativamente la evaluación de expresiones en `Values`. - -Valor predeterminado: 1. - -## Entrada_format_defaults_for_omitted_fields {#session_settings-input_format_defaults_for_omitted_fields} - -Al realizar `INSERT` consultas, reemplace los valores de columna de entrada omitidos con valores predeterminados de las columnas respectivas. Esta opción sólo se aplica a [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) y [TabSeparated](../../interfaces/formats.md#tabseparated) formato. - -!!! note "Nota" - Cuando esta opción está habilitada, los metadatos de la tabla extendida se envían del servidor al cliente. Consume recursos informáticos adicionales en el servidor y puede reducir el rendimiento. - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 1. - -## input_format_tsv_empty_as_default {#settings-input-format-tsv-empty-as-default} - -Cuando esté habilitado, reemplace los campos de entrada vacíos en TSV con valores predeterminados. Para expresiones predeterminadas complejas `input_format_defaults_for_omitted_fields` debe estar habilitado también. - -Deshabilitado de forma predeterminada. - -## input_format_null_as_default {#settings-input-format-null-as-default} - -Habilita o deshabilita el uso de valores predeterminados si los datos de entrada `NULL`, pero el tipo de datos de la columna correspondiente en no `Nullable(T)` (para formatos de entrada de texto). - -## input_format_skip_unknown_fields {#settings-input-format-skip-unknown-fields} - -Habilita o deshabilita omitir la inserción de datos adicionales. - -Al escribir datos, ClickHouse produce una excepción si los datos de entrada contienen columnas que no existen en la tabla de destino. Si la omisión está habilitada, ClickHouse no inserta datos adicionales y no lanza una excepción. - -Formatos soportados: - -- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) -- [CSVWithNames](../../interfaces/formats.md#csvwithnames) -- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) -- [TSKV](../../interfaces/formats.md#tskv) - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 0. - -## Entrada_format_import_nested_json {#settings-input_format_import_nested_json} - -Habilita o deshabilita la inserción de datos JSON con objetos anidados. - -Formatos soportados: - -- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 0. - -Ver también: - -- [Uso de estructuras anidadas](../../interfaces/formats.md#jsoneachrow-nested) con el `JSONEachRow` formato. - -## Entrada_format_with_names_use_header {#settings-input-format-with-names-use-header} - -Habilita o deshabilita la comprobación del orden de las columnas al insertar datos. - -Para mejorar el rendimiento de la inserción, se recomienda deshabilitar esta comprobación si está seguro de que el orden de columna de los datos de entrada es el mismo que en la tabla de destino. - -Formatos soportados: - -- [CSVWithNames](../../interfaces/formats.md#csvwithnames) -- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 1. - -## Date_time_input_format {#settings-date_time_input_format} - -Permite elegir un analizador de la representación de texto de fecha y hora. - -La configuración no se aplica a [Funciones de fecha y hora](../../sql-reference/functions/date-time-functions.md). - -Valores posibles: - -- `'best_effort'` — Enables extended parsing. - - ClickHouse puede analizar el básico `YYYY-MM-DD HH:MM:SS` formato y todo [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) formatos de fecha y hora. Por ejemplo, `'2018-06-08T01:02:03.000Z'`. - -- `'basic'` — Use basic parser. - - ClickHouse puede analizar solo lo básico `YYYY-MM-DD HH:MM:SS` formato. Por ejemplo, `'2019-08-20 10:18:56'`. - -Valor predeterminado: `'basic'`. - -Ver también: - -- [Tipo de datos DateTime.](../../sql-reference/data-types/datetime.md) -- [Funciones para trabajar con fechas y horas.](../../sql-reference/functions/date-time-functions.md) - -## Por favor, introduzca su dirección de correo electrónico {#settings-join_default_strictness} - -Establece el rigor predeterminado para [Cláusulas JOIN](../../sql-reference/statements/select/join.md#select-join). - -Valores posibles: - -- `ALL` — If the right table has several matching rows, ClickHouse creates a [Producto cartesiano](https://en.wikipedia.org/wiki/Cartesian_product) de filas coincidentes. Esta es la normal `JOIN` comportamiento de SQL estándar. -- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` y `ALL` son los mismos. -- `ASOF` — For joining sequences with an uncertain match. -- `Empty string` — If `ALL` o `ANY` no se especifica en la consulta, ClickHouse produce una excepción. - -Valor predeterminado: `ALL`. - -## join_any_take_last_row {#settings-join_any_take_last_row} - -Cambia el comportamiento de las operaciones de unión con `ANY` rigor. - -!!! warning "Atención" - Esta configuración sólo se aplica a `JOIN` operaciones con [Unir](../../engines/table-engines/special/join.md) mesas de motores. - -Valores posibles: - -- 0 — If the right table has more than one matching row, only the first one found is joined. -- 1 — If the right table has more than one matching row, only the last one found is joined. - -Valor predeterminado: 0. - -Ver también: - -- [Cláusula JOIN](../../sql-reference/statements/select/join.md#select-join) -- [Unirse al motor de tabla](../../engines/table-engines/special/join.md) -- [Por favor, introduzca su dirección de correo electrónico](#settings-join_default_strictness) - -## Sistema abierto {#join_use_nulls} - -Establece el tipo de [JOIN](../../sql-reference/statements/select/join.md) comportamiento. Al fusionar tablas, pueden aparecer celdas vacías. ClickHouse los rellena de manera diferente según esta configuración. - -Valores posibles: - -- 0 — The empty cells are filled with the default value of the corresponding field type. -- 1 — `JOIN` se comporta de la misma manera que en SQL estándar. El tipo del campo correspondiente se convierte en [NULL](../../sql-reference/data-types/nullable.md#data_type-nullable), y las celdas vacías se llenan con [NULL](../../sql-reference/syntax.md). - -Valor predeterminado: 0. - -## max_block_size {#setting-max_block_size} - -En ClickHouse, los datos se procesan mediante bloques (conjuntos de partes de columna). Los ciclos de procesamiento interno para un solo bloque son lo suficientemente eficientes, pero hay gastos notables en cada bloque. El `max_block_size` set es una recomendación para el tamaño del bloque (en un recuento de filas) para cargar desde las tablas. El tamaño del bloque no debe ser demasiado pequeño, por lo que los gastos en cada bloque aún se notan, pero no demasiado grande para que la consulta con LIMIT que se complete después del primer bloque se procese rápidamente. El objetivo es evitar consumir demasiada memoria al extraer un gran número de columnas en múltiples subprocesos y preservar al menos alguna localidad de caché. - -Valor predeterminado: 65,536. - -Bloquea el tamaño de `max_block_size` no siempre se cargan desde la tabla. Si es obvio que se deben recuperar menos datos, se procesa un bloque más pequeño. - -## preferred_block_size_bytes {#preferred-block-size-bytes} - -Utilizado para el mismo propósito que `max_block_size`, pero establece el tamaño de bloque recomendado en bytes adaptándolo al número de filas en el bloque. -Sin embargo, el tamaño del bloque no puede ser más que `max_block_size` filas. -Por defecto: 1,000,000. Solo funciona cuando se lee desde los motores MergeTree. - -## merge_tree_min_rows_for_concurrent_read {#setting-merge-tree-min-rows-for-concurrent-read} - -Si el número de filas que se leerán de un fichero [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md) mesa excede `merge_tree_min_rows_for_concurrent_read` luego ClickHouse intenta realizar una lectura simultánea de este archivo en varios hilos. - -Valores posibles: - -- Cualquier entero positivo. - -Valor predeterminado: 163840. - -## merge_tree_min_bytes_for_concurrent_read {#setting-merge-tree-min-bytes-for-concurrent-read} - -Si el número de bytes a leer de un archivo de un [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md)-La tabla del motor excede `merge_tree_min_bytes_for_concurrent_read`, entonces ClickHouse intenta leer simultáneamente este archivo en varios subprocesos. - -Valor posible: - -- Cualquier entero positivo. - -Valor predeterminado: 251658240. - -## Método de codificación de datos: {#setting-merge-tree-min-rows-for-seek} - -Si la distancia entre dos bloques de datos que se leen en un archivo es menor que `merge_tree_min_rows_for_seek` filas, luego ClickHouse no busca a través del archivo, sino que lee los datos secuencialmente. - -Valores posibles: - -- Cualquier entero positivo. - -Valor predeterminado: 0. - -## merge_tree_min_bytes_for_seek {#setting-merge-tree-min-bytes-for-seek} - -Si la distancia entre dos bloques de datos que se leen en un archivo es menor que `merge_tree_min_bytes_for_seek` bytes, luego ClickHouse lee secuencialmente un rango de archivos que contiene ambos bloques, evitando así la búsqueda adicional. - -Valores posibles: - -- Cualquier entero positivo. - -Valor predeterminado: 0. - -## merge_tree_coarse_index_granularity {#setting-merge-tree-coarse-index-granularity} - -Al buscar datos, ClickHouse comprueba las marcas de datos en el archivo de índice. Si ClickHouse encuentra que las claves requeridas están en algún rango, divide este rango en `merge_tree_coarse_index_granularity` subintervalos y busca las claves necesarias allí de forma recursiva. - -Valores posibles: - -- Cualquier entero incluso positivo. - -Valor predeterminado: 8. - -## merge_tree_max_rows_to_use_cache {#setting-merge-tree-max-rows-to-use-cache} - -Si ClickHouse debería leer más de `merge_tree_max_rows_to_use_cache` en una consulta, no usa la memoria caché de bloques sin comprimir. - -La memoria caché de bloques sin comprimir almacena datos extraídos para consultas. ClickHouse utiliza esta memoria caché para acelerar las respuestas a pequeñas consultas repetidas. Esta configuración protege la memoria caché del deterioro de las consultas que leen una gran cantidad de datos. El [Uncompressed_cache_size](../server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) configuración del servidor define el tamaño de la memoria caché de bloques sin comprimir. - -Valores posibles: - -- Cualquier entero positivo. - -Default value: 128 ✕ 8192. - -## merge_tree_max_bytes_to_use_cache {#setting-merge-tree-max-bytes-to-use-cache} - -Si ClickHouse debería leer más de `merge_tree_max_bytes_to_use_cache` bytes en una consulta, no usa el caché de bloques sin comprimir. - -La memoria caché de bloques sin comprimir almacena datos extraídos para consultas. ClickHouse utiliza esta memoria caché para acelerar las respuestas a pequeñas consultas repetidas. Esta configuración protege la memoria caché del deterioro de las consultas que leen una gran cantidad de datos. El [Uncompressed_cache_size](../server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) configuración del servidor define el tamaño de la memoria caché de bloques sin comprimir. - -Valor posible: - -- Cualquier entero positivo. - -Valor predeterminado: 2013265920. - -## Todos los derechos reservados {#settings-min-bytes-to-use-direct-io} - -El volumen de datos mínimo necesario para utilizar el acceso directo de E/S al disco de almacenamiento. - -ClickHouse usa esta configuración al leer datos de tablas. Si el volumen total de almacenamiento de todos los datos a leer excede `min_bytes_to_use_direct_io` luego ClickHouse lee los datos del disco de almacenamiento con el `O_DIRECT` opcion. - -Valores posibles: - -- 0 — Direct I/O is disabled. -- Entero positivo. - -Valor predeterminado: 0. - -## Log_queries {#settings-log-queries} - -Configuración del registro de consultas. - -Las consultas enviadas a ClickHouse con esta configuración se registran de acuerdo con las reglas [query_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query-log) parámetro de configuración del servidor. - -Ejemplo: - -``` text -log_queries=1 -``` - -## Nombre de la red inalámbrica (SSID): {#settings-log-queries-min-type} - -`query_log` tipo mínimo para iniciar sesión. - -Valores posibles: -- `QUERY_START` (`=1`) -- `QUERY_FINISH` (`=2`) -- `EXCEPTION_BEFORE_START` (`=3`) -- `EXCEPTION_WHILE_PROCESSING` (`=4`) - -Valor predeterminado: `QUERY_START`. - -Se puede usar para limitar a qué entiries va `query_log`, digamos que eres interesante solo en errores, entonces puedes usar `EXCEPTION_WHILE_PROCESSING`: - -``` text -log_queries_min_type='EXCEPTION_WHILE_PROCESSING' -``` - -## Log_query_threads {#settings-log-query-threads} - -Configuración del registro de subprocesos de consulta. - -Los subprocesos de consultas ejecutados por ClickHouse con esta configuración se registran de acuerdo con las reglas en el [Sistema abierto.](../server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) parámetro de configuración del servidor. - -Ejemplo: - -``` text -log_query_threads=1 -``` - -## Max_insert_block_size {#settings-max_insert_block_size} - -El tamaño de los bloques a formar para su inserción en una tabla. -Esta configuración solo se aplica en los casos en que el servidor forma los bloques. -Por ejemplo, para un INSERT a través de la interfaz HTTP, el servidor analiza el formato de datos y forma bloques del tamaño especificado. -Pero al usar clickhouse-client, el cliente analiza los datos en sí, y el ‘max_insert_block_size’ configuración en el servidor no afecta el tamaño de los bloques insertados. -La configuración tampoco tiene un propósito cuando se usa INSERT SELECT , ya que los datos se insertan usando los mismos bloques que se forman después de SELECT . - -Valor predeterminado: 1.048.576. - -El valor predeterminado es ligeramente más que `max_block_size`. La razón de esto se debe a que ciertos motores de mesa (`*MergeTree`) formar una parte de datos en el disco para cada bloque insertado, que es una entidad bastante grande. Similar, `*MergeTree` las tablas ordenan los datos durante la inserción y un tamaño de bloque lo suficientemente grande permiten clasificar más datos en la RAM. - -## Nombre de la red inalámbrica (SSID): {#min-insert-block-size-rows} - -Establece el número mínimo de filas en el bloque que se pueden insertar en una tabla `INSERT` consulta. Los bloques de menor tamaño se aplastan en otros más grandes. - -Valores posibles: - -- Entero positivo. -- 0 — Squashing disabled. - -Valor predeterminado: 1048576. - -## Todos los derechos reservados {#min-insert-block-size-bytes} - -Establece el número mínimo de bytes en el bloque que se pueden insertar en una tabla `INSERT` consulta. Los bloques de menor tamaño se aplastan en otros más grandes. - -Valores posibles: - -- Entero positivo. -- 0 — Squashing disabled. - -Valor predeterminado: 268435456. - -## max_replica_delay_for_distributed_queries {#settings-max_replica_delay_for_distributed_queries} - -Deshabilita las réplicas rezagadas para consultas distribuidas. Ver [Replicación](../../engines/table-engines/mergetree-family/replication.md). - -Establece el tiempo en segundos. Si una réplica tiene un retraso superior al valor establecido, no se utiliza esta réplica. - -Valor predeterminado: 300. - -Se utiliza al realizar `SELECT` desde una tabla distribuida que apunta a tablas replicadas. - -## max_threads {#settings-max_threads} - -El número máximo de subprocesos de procesamiento de consultas, excluyendo subprocesos para recuperar datos de servidores ‘max_distributed_connections’ parámetro). - -Este parámetro se aplica a los subprocesos que realizan las mismas etapas de la canalización de procesamiento de consultas en paralelo. -Por ejemplo, al leer desde una tabla, si es posible evaluar expresiones con funciones, filtre con WHERE y preagregue para GROUP BY en paralelo usando al menos ‘max_threads’ número de hilos, entonces ‘max_threads’ se utilizan. - -Valor predeterminado: el número de núcleos de CPU físicos. - -Si normalmente se ejecuta menos de una consulta SELECT en un servidor a la vez, establezca este parámetro en un valor ligeramente inferior al número real de núcleos de procesador. - -Para las consultas que se completan rápidamente debido a un LIMIT, puede establecer un ‘max_threads’. Por ejemplo, si el número necesario de entradas se encuentra en cada bloque y max_threads = 8, entonces se recuperan 8 bloques, aunque hubiera sido suficiente leer solo uno. - -Cuanto menor sea el `max_threads` valor, menos memoria se consume. - -## Método de codificación de datos: {#settings-max-insert-threads} - -El número máximo de subprocesos para ejecutar el `INSERT SELECT` consulta. - -Valores posibles: - -- 0 (or 1) — `INSERT SELECT` sin ejecución paralela. -- Entero positivo. Más grande que 1. - -Valor predeterminado: 0. - -Paralelo `INSERT SELECT` sólo tiene efecto si el `SELECT` parte se ejecuta en paralelo, ver [max_threads](#settings-max_threads) configuración. -Los valores más altos conducirán a un mayor uso de memoria. - -## max_compress_block_size {#max-compress-block-size} - -El tamaño máximo de bloques de datos sin comprimir antes de comprimir para escribir en una tabla. De forma predeterminada, 1.048.576 (1 MiB). Si se reduce el tamaño, la tasa de compresión se reduce significativamente, la velocidad de compresión y descompresión aumenta ligeramente debido a la localidad de la memoria caché, y se reduce el consumo de memoria. Por lo general, no hay ninguna razón para cambiar esta configuración. - -No confunda bloques para la compresión (un fragmento de memoria que consta de bytes) con bloques para el procesamiento de consultas (un conjunto de filas de una tabla). - -## Descripción del producto {#min-compress-block-size} - -Para [Método de codificación de datos:](../../engines/table-engines/mergetree-family/mergetree.md)" tabla. Para reducir la latencia al procesar consultas, un bloque se comprime al escribir la siguiente marca si su tamaño es al menos ‘min_compress_block_size’. De forma predeterminada, 65.536. - -El tamaño real del bloque, si los datos sin comprimir son menores que ‘max_compress_block_size’, no es menor que este valor y no menor que el volumen de datos para una marca. - -Veamos un ejemplo. Supongamos que ‘index_granularity’ se estableció en 8192 durante la creación de la tabla. - -Estamos escribiendo una columna de tipo UInt32 (4 bytes por valor). Al escribir 8192 filas, el total será de 32 KB de datos. Como min_compress_block_size = 65,536, se formará un bloque comprimido por cada dos marcas. - -Estamos escribiendo una columna URL con el tipo String (tamaño promedio de 60 bytes por valor). Al escribir 8192 filas, el promedio será ligeramente inferior a 500 KB de datos. Como esto es más de 65,536, se formará un bloque comprimido para cada marca. En este caso, al leer datos del disco en el rango de una sola marca, los datos adicionales no se descomprimirán. - -Por lo general, no hay ninguna razón para cambiar esta configuración. - -## max_query_size {#settings-max_query_size} - -La parte máxima de una consulta que se puede llevar a la RAM para analizar con el analizador SQL. -La consulta INSERT también contiene datos para INSERT que es procesado por un analizador de secuencias independiente (que consume O(1) RAM), que no está incluido en esta restricción. - -Valor predeterminado: 256 KiB. - -## interactive_delay {#interactive-delay} - -El intervalo en microsegundos para comprobar si la ejecución de la solicitud se ha cancelado y enviar el progreso. - -Valor predeterminado: 100.000 (comprueba la cancelación y envía el progreso diez veces por segundo). - -## ¿Cómo puedo hacerlo? {#connect-timeout-receive-timeout-send-timeout} - -Tiempos de espera en segundos en el socket utilizado para comunicarse con el cliente. - -Valor predeterminado: 10, 300, 300. - -## Cancel_http_readonly_queries_on_client_close {#cancel-http-readonly-queries-on-client-close} - -Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. - -Valor predeterminado: 0 - -## poll_interval {#poll-interval} - -Bloquear en un bucle de espera durante el número especificado de segundos. - -Valor predeterminado: 10. - -## max_distributed_connections {#max-distributed-connections} - -El número máximo de conexiones simultáneas con servidores remotos para el procesamiento distribuido de una única consulta a una única tabla distribuida. Se recomienda establecer un valor no menor que el número de servidores en el clúster. - -Valor predeterminado: 1024. - -Los siguientes parámetros solo se usan al crear tablas distribuidas (y al iniciar un servidor), por lo que no hay ninguna razón para cambiarlas en tiempo de ejecución. - -## Distributed_connections_pool_size {#distributed-connections-pool-size} - -El número máximo de conexiones simultáneas con servidores remotos para el procesamiento distribuido de todas las consultas a una única tabla distribuida. Se recomienda establecer un valor no menor que el número de servidores en el clúster. - -Valor predeterminado: 1024. - -## Conecte_timeout_with_failover_ms {#connect-timeout-with-failover-ms} - -El tiempo de espera en milisegundos para conectarse a un servidor remoto para un motor de tablas distribuidas ‘shard’ y ‘replica’ secciones se utilizan en la definición de clúster. -Si no tiene éxito, se realizan varios intentos para conectarse a varias réplicas. - -Valor predeterminado: 50. - -## connections_with_failover_max_tries {#connections-with-failover-max-tries} - -El número máximo de intentos de conexión con cada réplica para el motor de tablas distribuidas. - -Valor predeterminado: 3. - -## extremo {#extremes} - -Ya sea para contar valores extremos (los mínimos y máximos en columnas de un resultado de consulta). Acepta 0 o 1. De forma predeterminada, 0 (deshabilitado). -Para obtener más información, consulte la sección “Extreme values”. - -## Use_uncompressed_cache {#setting-use_uncompressed_cache} - -Si se debe usar una memoria caché de bloques sin comprimir. Acepta 0 o 1. De forma predeterminada, 0 (deshabilitado). -El uso de la memoria caché sin comprimir (solo para tablas de la familia MergeTree) puede reducir significativamente la latencia y aumentar el rendimiento cuando se trabaja con un gran número de consultas cortas. Habilite esta configuración para los usuarios que envían solicitudes cortas frecuentes. También preste atención al [Uncompressed_cache_size](../server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. - -Para consultas que leen al menos un volumen algo grande de datos (un millón de filas o más), la memoria caché sin comprimir se desactiva automáticamente para ahorrar espacio para consultas realmente pequeñas. Esto significa que puede mantener el ‘use_uncompressed_cache’ ajuste siempre establecido en 1. - -## Reemplazar_running_query {#replace-running-query} - -Cuando se utiliza la interfaz HTTP, el ‘query_id’ parámetro puede ser pasado. Se trata de cualquier cadena que sirva como identificador de consulta. -Si una consulta del mismo usuario ‘query_id’ que ya existe en este momento, el comportamiento depende de la ‘replace_running_query’ parámetro. - -`0` (default) – Throw an exception (don't allow the query to run if a query with the same ‘query_id’ ya se está ejecutando). - -`1` – Cancel the old query and start running the new one. - -El Yandex.Metrica utiliza este parámetro establecido en 1 para implementar sugerencias para las condiciones de segmentación. Después de ingresar el siguiente carácter, si la consulta anterior aún no ha finalizado, debe cancelarse. - -## Nombre de la red inalámbrica (SSID): {#stream-flush-interval-ms} - -Funciona para tablas con streaming en el caso de un tiempo de espera, o cuando un subproceso genera [Max_insert_block_size](#settings-max_insert_block_size) filas. - -El valor predeterminado es 7500. - -Cuanto menor sea el valor, más a menudo los datos se vacían en la tabla. Establecer el valor demasiado bajo conduce a un rendimiento deficiente. - -## load_balancing {#settings-load_balancing} - -Especifica el algoritmo de selección de réplicas que se utiliza para el procesamiento de consultas distribuidas. - -ClickHouse admite los siguientes algoritmos para elegir réplicas: - -- [Aleatorio](#load_balancing-random) (predeterminada) -- [Nombre de host más cercano](#load_balancing-nearest_hostname) -- [En orden](#load_balancing-in_order) -- [Primero o aleatorio](#load_balancing-first_or_random) - -### Aleatorio (por defecto) {#load_balancing-random} - -``` sql -load_balancing = random -``` - -El número de errores se cuenta para cada réplica. La consulta se envía a la réplica con el menor número de errores, y si hay varios de estos, a cualquiera de ellos. -Desventajas: La proximidad del servidor no se tiene en cuenta; si las réplicas tienen datos diferentes, también obtendrá datos diferentes. - -### Nombre de host más cercano {#load_balancing-nearest_hostname} - -``` sql -load_balancing = nearest_hostname -``` - -The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server's hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). - -Por ejemplo, example01-01-1 y example01-01-2.yandex.ru son diferentes en una posición, mientras que example01-01-1 y example01-02-2 difieren en dos lugares. -Este método puede parecer primitivo, pero no requiere datos externos sobre la topología de red, y no compara las direcciones IP, lo que sería complicado para nuestras direcciones IPv6. - -Por lo tanto, si hay réplicas equivalentes, se prefiere la más cercana por nombre. -También podemos suponer que al enviar una consulta al mismo servidor, en ausencia de fallas, una consulta distribuida también irá a los mismos servidores. Por lo tanto, incluso si se colocan datos diferentes en las réplicas, la consulta devolverá principalmente los mismos resultados. - -### En orden {#load_balancing-in_order} - -``` sql -load_balancing = in_order -``` - -Se accede a las réplicas con el mismo número de errores en el mismo orden en que se especifican en la configuración. -Este método es apropiado cuando se sabe exactamente qué réplica es preferible. - -### Primero o aleatorio {#load_balancing-first_or_random} - -``` sql -load_balancing = first_or_random -``` - -Este algoritmo elige la primera réplica del conjunto o una réplica aleatoria si la primera no está disponible. Es efectivo en configuraciones de topología de replicación cruzada, pero inútil en otras configuraciones. - -El `first_or_random` resuelve el problema del algoritmo `in_order` algoritmo. Con `in_order`, si una réplica se cae, la siguiente obtiene una carga doble mientras que las réplicas restantes manejan la cantidad habitual de tráfico. Cuando se utiliza el `first_or_random` algoritmo, la carga se distribuye uniformemente entre las réplicas que todavía están disponibles. - -## prefer_localhost_replica {#settings-prefer-localhost-replica} - -Habilita/deshabilita el uso preferible de la réplica localhost al procesar consultas distribuidas. - -Valores posibles: - -- 1 — ClickHouse always sends a query to the localhost replica if it exists. -- 0 — ClickHouse uses the balancing strategy specified by the [load_balancing](#settings-load_balancing) configuración. - -Valor predeterminado: 1. - -!!! warning "Advertencia" - Deshabilite esta configuración si usa [max_parallel_replicas](#settings-max_parallel_replicas). - -## totals_mode {#totals-mode} - -Cómo calcular TOTALS cuando HAVING está presente, así como cuando max_rows_to_group_by y group_by_overflow_mode = ‘any’ están presentes. -Vea la sección “WITH TOTALS modifier”. - -## totals_auto_threshold {#totals-auto-threshold} - -El umbral para `totals_mode = 'auto'`. -Vea la sección “WITH TOTALS modifier”. - -## max_parallel_replicas {#settings-max_parallel_replicas} - -El número máximo de réplicas para cada fragmento al ejecutar una consulta. -Para obtener coherencia (para obtener diferentes partes de la misma división de datos), esta opción solo funciona cuando se establece la clave de muestreo. -El retraso de réplica no está controlado. - -## compilar {#compile} - -Habilitar la compilación de consultas. De forma predeterminada, 0 (deshabilitado). - -La compilación solo se usa para parte de la canalización de procesamiento de consultas: para la primera etapa de agregación (GROUP BY). -Si se compiló esta parte de la canalización, la consulta puede ejecutarse más rápido debido a la implementación de ciclos cortos y a las llamadas de función agregadas en línea. La mejora del rendimiento máximo (hasta cuatro veces más rápido en casos excepcionales) se ve para consultas con múltiples funciones agregadas simples. Por lo general, la ganancia de rendimiento es insignificante. En casos muy raros, puede ralentizar la ejecución de la consulta. - -## min_count_to_compile {#min-count-to-compile} - -¿Cuántas veces usar potencialmente un fragmento de código compilado antes de ejecutar la compilación? Por defecto, 3. -For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. -Si el valor es 1 o más, la compilación se produce de forma asíncrona en un subproceso independiente. El resultado se utilizará tan pronto como esté listo, incluidas las consultas que se están ejecutando actualmente. - -Se requiere código compilado para cada combinación diferente de funciones agregadas utilizadas en la consulta y el tipo de claves en la cláusula GROUP BY. -The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. - -## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} - -Si el valor es true, los enteros aparecen entre comillas cuando se usan los formatos JSON\* Int64 y UInt64 (por compatibilidad con la mayoría de las implementaciones de JavaScript); de lo contrario, los enteros se generan sin las comillas. - -## Formato_csv_delimiter {#settings-format_csv_delimiter} - -El carácter interpretado como un delimitador en los datos CSV. De forma predeterminada, el delimitador es `,`. - -## input_format_csv_unquoted_null_literal_as_null {#settings-input_format_csv_unquoted_null_literal_as_null} - -Para el formato de entrada CSV, habilita o deshabilita el análisis de `NULL` como literal (sinónimo de `\N`). - -## output_format_csv_crlf_end_of_line {#settings-output-format-csv-crlf-end-of-line} - -Utilice el separador de línea de estilo DOS / Windows (CRLF) en CSV en lugar de estilo Unix (LF). - -## output_format_tsv_crlf_end_of_line {#settings-output-format-tsv-crlf-end-of-line} - -Utilice el separador de línea de estilo DOC / Windows (CRLF) en TSV en lugar del estilo Unix (LF). - -## insert_quorum {#settings-insert_quorum} - -Habilita las escrituras de quórum. - -- Si `insert_quorum < 2`, las escrituras de quórum están deshabilitadas. -- Si `insert_quorum >= 2`, las escrituras de quórum están habilitadas. - -Valor predeterminado: 0. - -Quorum escribe - -`INSERT` solo tiene éxito cuando ClickHouse logra escribir correctamente datos en el `insert_quorum` de réplicas durante el `insert_quorum_timeout`. Si por alguna razón el número de réplicas con escrituras exitosas no alcanza el `insert_quorum`, la escritura se considera fallida y ClickHouse eliminará el bloque insertado de todas las réplicas donde los datos ya se han escrito. - -Todas las réplicas del quórum son consistentes, es decir, contienen datos de todas las réplicas anteriores `INSERT` consulta. El `INSERT` la secuencia está linealizada. - -Al leer los datos escritos desde el `insert_quorum` usted puede utilizar el [select_sequential_consistency](#settings-select_sequential_consistency) opcion. - -ClickHouse genera una excepción - -- Si el número de réplicas disponibles en el momento de la consulta es `insert_quorum`. -- En un intento de escribir datos cuando el bloque anterior aún no se ha insertado en el `insert_quorum` de réplicas. Esta situación puede ocurrir si el usuario intenta realizar una `INSERT` antes de la anterior con el `insert_quorum` se ha completado. - -Ver también: - -- [insert_quorum_timeout](#settings-insert_quorum_timeout) -- [select_sequential_consistency](#settings-select_sequential_consistency) - -## insert_quorum_timeout {#settings-insert_quorum_timeout} - -Escribir en tiempo de espera de quórum en segundos. Si el tiempo de espera ha pasado y aún no se ha realizado ninguna escritura, ClickHouse generará una excepción y el cliente debe repetir la consulta para escribir el mismo bloque en la misma réplica o en cualquier otra réplica. - -Valor predeterminado: 60 segundos. - -Ver también: - -- [insert_quorum](#settings-insert_quorum) -- [select_sequential_consistency](#settings-select_sequential_consistency) - -## select_sequential_consistency {#settings-select_sequential_consistency} - -Habilita o deshabilita la coherencia secuencial para `SELECT` consulta: - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 0. - -Uso - -Cuando se habilita la coherencia secuencial, ClickHouse permite al cliente ejecutar el `SELECT` consulta sólo para aquellas réplicas que contienen datos de todas las `INSERT` consultas ejecutadas con `insert_quorum`. Si el cliente hace referencia a una réplica parcial, ClickHouse generará una excepción. La consulta SELECT no incluirá datos que aún no se hayan escrito en el quórum de réplicas. - -Ver también: - -- [insert_quorum](#settings-insert_quorum) -- [insert_quorum_timeout](#settings-insert_quorum_timeout) - -## insert_deduplicate {#settings-insert-deduplicate} - -Habilita o deshabilita la desduplicación de bloques `INSERT` (para tablas replicadas\* - -Valores posibles: - -- 0 — Disabled. -- 1 — Enabled. - -Valor predeterminado: 1. - -De forma predeterminada, los bloques insertados en tablas replicadas `INSERT` declaración se deduplican (ver [Replicación de datos](../../engines/table-engines/mergetree-family/replication.md)). - -## deduplicate_blocks_in_dependent_materialized_views {#settings-deduplicate-blocks-in-dependent-materialized-views} - -Habilita o deshabilita la comprobación de desduplicación para las vistas materializadas que reciben datos de tablas replicadas\*. - -Valores posibles: - - 0 — Disabled. - 1 — Enabled. - -Valor predeterminado: 0. - -Uso - -De forma predeterminada, la desduplicación no se realiza para las vistas materializadas, sino que se realiza en sentido ascendente, en la tabla de origen. -Si se omite un bloque INSERTed debido a la desduplicación en la tabla de origen, no habrá inserción en las vistas materializadas adjuntas. Este comportamiento existe para permitir la inserción de datos altamente agregados en vistas materializadas, para los casos en que los bloques insertados son los mismos después de la agregación de vistas materializadas pero derivados de diferentes INSERT en la tabla de origen. -Al mismo tiempo, este comportamiento “breaks” `INSERT` idempotencia. Si una `INSERT` en la mesa principal fue exitoso y `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won't receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` permite cambiar este comportamiento. Al reintentar, una vista materializada recibirá la inserción de repetición y realizará la comprobación de desduplicación por sí misma, -ignorando el resultado de la comprobación para la tabla de origen, e insertará filas perdidas debido a la primera falla. - -## Método de codificación de datos: {#settings-max-network-bytes} - -Limita el volumen de datos (en bytes) que se recibe o se transmite a través de la red al ejecutar una consulta. Esta configuración se aplica a cada consulta individual. - -Valores posibles: - -- Entero positivo. -- 0 — Data volume control is disabled. - -Valor predeterminado: 0. - -## Método de codificación de datos: {#settings-max-network-bandwidth} - -Limita la velocidad del intercambio de datos a través de la red en bytes por segundo. Esta configuración se aplica a todas las consultas. - -Valores posibles: - -- Entero positivo. -- 0 — Bandwidth control is disabled. - -Valor predeterminado: 0. - -## Todos los derechos reservados {#settings-max-network-bandwidth-for-user} - -Limita la velocidad del intercambio de datos a través de la red en bytes por segundo. Esta configuración se aplica a todas las consultas que se ejecutan simultáneamente realizadas por un único usuario. - -Valores posibles: - -- Entero positivo. -- 0 — Control of the data speed is disabled. - -Valor predeterminado: 0. - -## Todos los derechos reservados {#settings-max-network-bandwidth-for-all-users} - -Limita la velocidad a la que se intercambian datos a través de la red en bytes por segundo. Esta configuración se aplica a todas las consultas que se ejecutan simultáneamente en el servidor. - -Valores posibles: - -- Entero positivo. -- 0 — Control of the data speed is disabled. - -Valor predeterminado: 0. - -## count_distinct_implementation {#settings-count_distinct_implementation} - -Especifica cuál de las `uniq*` se deben utilizar para realizar el [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference.md#agg_function-count) construcción. - -Valores posibles: - -- [uniq](../../sql-reference/aggregate-functions/reference.md#agg_function-uniq) -- [uniqCombined](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqcombined) -- [UniqCombined64](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqcombined64) -- [uniqHLL12](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqhll12) -- [uniqExact](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqexact) - -Valor predeterminado: `uniqExact`. - -## skip_unavailable_shards {#settings-skip_unavailable_shards} - -Habilita o deshabilita la omisión silenciosa de fragmentos no disponibles. - -El fragmento se considera no disponible si todas sus réplicas no están disponibles. Una réplica no está disponible en los siguientes casos: - -- ClickHouse no puede conectarse a la réplica por ningún motivo. - - Al conectarse a una réplica, ClickHouse realiza varios intentos. Si todos estos intentos fallan, la réplica se considera que no está disponible. - -- La réplica no se puede resolver a través de DNS. - - Si el nombre de host de la réplica no se puede resolver a través de DNS, puede indicar las siguientes situaciones: - - - El host de Replica no tiene registro DNS. Puede ocurrir en sistemas con DNS dinámico, por ejemplo, [Kubernetes](https://kubernetes.io), donde los nodos pueden ser irresolubles durante el tiempo de inactividad, y esto no es un error. - - - Error de configuración. El archivo de configuración de ClickHouse contiene un nombre de host incorrecto. - -Valores posibles: - -- 1 — skipping enabled. - - Si un fragmento no está disponible, ClickHouse devuelve un resultado basado en datos parciales y no informa de problemas de disponibilidad de nodos. - -- 0 — skipping disabled. - - Si un fragmento no está disponible, ClickHouse produce una excepción. - -Valor predeterminado: 0. - -## Optize_skip_unused_shards {#settings-optimize_skip_unused_shards} - -Habilita o deshabilita la omisión de fragmentos no utilizados para las consultas SELECT que tienen la condición de clave de fragmentación en PREWHERE / WHERE (supone que los datos se distribuyen mediante clave de fragmentación, de lo contrario no hacer nada). - -Valor predeterminado: 0 - -## Fuerza_optimize_skip_unused_shards {#settings-force_optimize_skip_unused_shards} - -Habilita o deshabilita la ejecución de consultas si [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) no es posible omitir fragmentos no utilizados. Si la omisión no es posible y la configuración está habilitada, se lanzará una excepción. - -Valores posibles: - -- 0 - Discapacitados (no lanza) -- 1: deshabilite la ejecución de consultas solo si la tabla tiene una clave de fragmentación -- 2: deshabilita la ejecución de consultas independientemente de que se haya definido la clave de fragmentación para la tabla - -Valor predeterminado: 0 - -## Optize_throw_if_noop {#setting-optimize_throw_if_noop} - -Habilita o deshabilita el lanzamiento de una excepción [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) la consulta no realizó una fusión. - -Predeterminada, `OPTIMIZE` devuelve con éxito incluso si no hizo nada. Esta configuración le permite diferenciar estas situaciones y obtener el motivo en un mensaje de excepción. - -Valores posibles: - -- 1 — Throwing an exception is enabled. -- 0 — Throwing an exception is disabled. - -Valor predeterminado: 0. - -## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life} - -- Tipo: segundos -- Valor predeterminado: 60 segundos - -Controla la rapidez con la que se ponen a cero los errores en las tablas distribuidas. Si una réplica no está disponible durante algún tiempo, acumula 5 errores y distribut_replica_error_half_life se establece en 1 segundo, la réplica se considera normal 3 segundos después del último error. - -Ver también: - -- [Motor de tabla distribuido](../../engines/table-engines/special/distributed.md) -- [distributed_replica_error_cap](#settings-distributed_replica_error_cap) - -## distributed_replica_error_cap {#settings-distributed_replica_error_cap} - -- Tipo: unsigned int -- Valor predeterminado: 1000 - -El recuento de errores de cada réplica está limitado a este valor, lo que impide que una sola réplica acumule demasiados errores. - -Ver también: - -- [Motor de tabla distribuido](../../engines/table-engines/special/distributed.md) -- [distributed_replica_error_half_life](#settings-distributed_replica_error_half_life) - -## Distributed_directory_monitor_sleep_time_ms {#distributed_directory_monitor_sleep_time_ms} - -Intervalo base para el [Distribuido](../../engines/table-engines/special/distributed.md) motor de tabla para enviar datos. El intervalo real crece exponencialmente en caso de errores. - -Valores posibles: - -- Un número entero positivo de milisegundos. - -Valor predeterminado: 100 milisegundos. - -## Distributed_directory_monitor_max_sleep_time_ms {#distributed_directory_monitor_max_sleep_time_ms} - -Intervalo máximo para el [Distribuido](../../engines/table-engines/special/distributed.md) motor de tabla para enviar datos. Limita el crecimiento exponencial del intervalo establecido en el [Distributed_directory_monitor_sleep_time_ms](#distributed_directory_monitor_sleep_time_ms) configuración. - -Valores posibles: - -- Un número entero positivo de milisegundos. - -Valor predeterminado: 30000 milisegundos (30 segundos). - -## distributed_directory_monitor_batch_inserts {#distributed_directory_monitor_batch_inserts} - -Habilita/deshabilita el envío de datos insertados en lotes. - -Cuando el envío por lotes está habilitado, el [Distribuido](../../engines/table-engines/special/distributed.md) El motor de tabla intenta enviar varios archivos de datos insertados en una operación en lugar de enviarlos por separado. El envío por lotes mejora el rendimiento del clúster al utilizar mejor los recursos del servidor y de la red. - -Valores posibles: - -- 1 — Enabled. -- 0 — Disabled. - -Valor predeterminado: 0. - -## os_thread_priority {#setting-os-thread-priority} - -Establece la prioridad ([agradable](https://en.wikipedia.org/wiki/Nice_(Unix))) para subprocesos que ejecutan consultas. El programador del sistema operativo considera esta prioridad al elegir el siguiente hilo para ejecutar en cada núcleo de CPU disponible. - -!!! warning "Advertencia" - Para utilizar esta configuración, debe establecer el `CAP_SYS_NICE` capacidad. El `clickhouse-server` paquete lo configura durante la instalación. Algunos entornos virtuales no le permiten establecer `CAP_SYS_NICE` capacidad. En este caso, `clickhouse-server` muestra un mensaje al respecto al principio. - -Valores posibles: - -- Puede establecer valores en el rango `[-20, 19]`. - -Los valores más bajos significan mayor prioridad. Hilos con bajo `nice` Los valores de prioridad se ejecutan con más frecuencia que los subprocesos con valores altos. Los valores altos son preferibles para consultas no interactivas de larga ejecución porque les permite renunciar rápidamente a recursos en favor de consultas interactivas cortas cuando llegan. - -Valor predeterminado: 0. - -## query_profiler_real_time_period_ns {#query_profiler_real_time_period_ns} - -Establece el período para un temporizador de reloj real del [perfilador de consultas](../../operations/optimizing-performance/sampling-query-profiler.md). El temporizador de reloj real cuenta el tiempo del reloj de pared. - -Valores posibles: - -- Número entero positivo, en nanosegundos. - - Valores recomendados: - - - 10000000 (100 times a second) nanoseconds and less for single queries. - - 1000000000 (once a second) for cluster-wide profiling. - -- 0 para apagar el temporizador. - -Tipo: [UInt64](../../sql-reference/data-types/int-uint.md). - -Valor predeterminado: 1000000000 nanosegundos (una vez por segundo). - -Ver también: - -- Tabla del sistema [trace_log](../../operations/system-tables.md#system_tables-trace_log) - -## Los resultados de la prueba {#query_profiler_cpu_time_period_ns} - -Establece el período para un temporizador de reloj de CPU [perfilador de consultas](../../operations/optimizing-performance/sampling-query-profiler.md). Este temporizador solo cuenta el tiempo de CPU. - -Valores posibles: - -- Un número entero positivo de nanosegundos. - - Valores recomendados: - - - 10000000 (100 times a second) nanoseconds and more for single queries. - - 1000000000 (once a second) for cluster-wide profiling. - -- 0 para apagar el temporizador. - -Tipo: [UInt64](../../sql-reference/data-types/int-uint.md). - -Valor predeterminado: 1000000000 nanosegundos. - -Ver también: - -- Tabla del sistema [trace_log](../../operations/system-tables.md#system_tables-trace_log) - -## allow_introspection_functions {#settings-allow_introspection_functions} - -Habilita deshabilita [funciones de introspecciones](../../sql-reference/functions/introspection.md) para la creación de perfiles de consultas. - -Valores posibles: - -- 1 — Introspection functions enabled. -- 0 — Introspection functions disabled. - -Valor predeterminado: 0. - -**Ver también** - -- [Analizador de consultas de muestreo](../optimizing-performance/sampling-query-profiler.md) -- Tabla del sistema [trace_log](../../operations/system-tables.md#system_tables-trace_log) - -## input_format_parallel_parsing {#input-format-parallel-parsing} - -- Tipo: bool -- Valor predeterminado: True - -Habilitar el análisis paralelo de los formatos de datos para preservar el orden. Solo se admite para los formatos TSV, TKSV, CSV y JSONEachRow. - -## También puede utilizar los siguientes métodos de envío: {#min-chunk-bytes-for-parallel-parsing} - -- Tipo: unsigned int -- Valor predeterminado: 1 MiB - -El tamaño mínimo de fragmento en bytes, que cada subproceso analizará en paralelo. - -## Sistema abierto {#settings-output_format_avro_codec} - -Establece el códec de compresión utilizado para el archivo Avro de salida. - -Tipo: cadena - -Valores posibles: - -- `null` — No compression -- `deflate` — Compress with Deflate (zlib) -- `snappy` — Compress with [Rápido](https://google.github.io/snappy/) - -Valor predeterminado: `snappy` (si está disponible) o `deflate`. - -## Sistema abierto {#settings-output_format_avro_sync_interval} - -Establece el tamaño mínimo de datos (en bytes) entre los marcadores de sincronización para el archivo Avro de salida. - -Tipo: unsigned int - -Valores posibles: 32 (32 bytes) - 1073741824 (1 GiB) - -Valor predeterminado: 32768 (32 KiB) - -## Todos los derechos reservados {#settings-format_avro_schema_registry_url} - -Establece la URL del Registro de esquemas confluentes para usar con [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) formato - -Tipo: URL - -Valor predeterminado: Vacío - -## background_pool_size {#background_pool_size} - -Establece el número de subprocesos que realizan operaciones en segundo plano en motores de tabla (por ejemplo, fusiona [Motor MergeTree](../../engines/table-engines/mergetree-family/index.md) tabla). Esta configuración se aplica al inicio del servidor ClickHouse y no se puede cambiar en una sesión de usuario. Al ajustar esta configuración, puede administrar la carga de la CPU y el disco. Un tamaño de grupo más pequeño utiliza menos recursos de CPU y disco, pero los procesos en segundo plano avanzan más lentamente, lo que eventualmente podría afectar el rendimiento de la consulta. - -Valores posibles: - -- Cualquier entero positivo. - -Valor predeterminado: 16. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/es/operations/system-tables.md b/docs/es/operations/system-tables.md deleted file mode 100644 index 18e7f7227da..00000000000 --- a/docs/es/operations/system-tables.md +++ /dev/null @@ -1,1168 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 52 -toc_title: Tablas del sistema ---- - -# Tablas del sistema {#system-tables} - -Las tablas del sistema se utilizan para implementar parte de la funcionalidad del sistema y para proporcionar acceso a información sobre cómo funciona el sistema. -No puede eliminar una tabla del sistema (pero puede realizar DETACH). -Las tablas del sistema no tienen archivos con datos en el disco o archivos con metadatos. El servidor crea todas las tablas del sistema cuando se inicia. -Las tablas del sistema son de solo lectura. -Están ubicados en el ‘system’ base. - -## sistema.asynchronous_metrics {#system_tables-asynchronous_metrics} - -Contiene métricas que se calculan periódicamente en segundo plano. Por ejemplo, la cantidad de RAM en uso. - -Columna: - -- `metric` ([Cadena](../sql-reference/data-types/string.md)) — Metric name. -- `value` ([Float64](../sql-reference/data-types/float.md)) — Metric value. - -**Ejemplo** - -``` sql -SELECT * FROM system.asynchronous_metrics LIMIT 10 -``` - -``` text -┌─metric──────────────────────────────────┬──────value─┐ -│ jemalloc.background_thread.run_interval │ 0 │ -│ jemalloc.background_thread.num_runs │ 0 │ -│ jemalloc.background_thread.num_threads │ 0 │ -│ jemalloc.retained │ 422551552 │ -│ jemalloc.mapped │ 1682989056 │ -│ jemalloc.resident │ 1656446976 │ -│ jemalloc.metadata_thp │ 0 │ -│ jemalloc.metadata │ 10226856 │ -│ UncompressedCacheCells │ 0 │ -│ MarkCacheFiles │ 0 │ -└─────────────────────────────────────────┴────────────┘ -``` - -**Ver también** - -- [Monitoreo](monitoring.md) — Base concepts of ClickHouse monitoring. -- [sistema.métricas](#system_tables-metrics) — Contains instantly calculated metrics. -- [sistema.evento](#system_tables-events) — Contains a number of events that have occurred. -- [sistema.metric_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. - -## sistema.Cluster {#system-clusters} - -Contiene información sobre los clústeres disponibles en el archivo de configuración y los servidores que contienen. - -Columna: - -- `cluster` (String) — The cluster name. -- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. -- `shard_weight` (UInt32) — The relative weight of the shard when writing data. -- `replica_num` (UInt32) — The replica number in the shard, starting from 1. -- `host_name` (String) — The host name, as specified in the config. -- `host_address` (String) — The host IP address obtained from DNS. -- `port` (UInt16) — The port to use for connecting to the server. -- `user` (String) — The name of the user for connecting to the server. -- `errors_count` (UInt32): número de veces que este host no pudo alcanzar la réplica. -- `estimated_recovery_time` (UInt32): quedan segundos hasta que el recuento de errores de réplica se ponga a cero y se considere que vuelve a la normalidad. - -Tenga en cuenta que `errors_count` se actualiza una vez por consulta al clúster, pero `estimated_recovery_time` se vuelve a calcular bajo demanda. Entonces podría haber un caso distinto de cero `errors_count` y cero `estimated_recovery_time`, esa próxima consulta será cero `errors_count` e intente usar la réplica como si no tuviera errores. - -**Ver también** - -- [Motor de tabla distribuido](../engines/table-engines/special/distributed.md) -- [distributed_replica_error_cap configuración](settings/settings.md#settings-distributed_replica_error_cap) -- [distributed_replica_error_half_life configuración](settings/settings.md#settings-distributed_replica_error_half_life) - -## sistema.columna {#system-columns} - -Contiene información sobre las columnas de todas las tablas. - -Puede utilizar esta tabla para obtener información similar a la [DESCRIBE TABLE](../sql-reference/statements/misc.md#misc-describe-table) consulta, pero para varias tablas a la vez. - -El `system.columns` tabla contiene las siguientes columnas (el tipo de columna se muestra entre corchetes): - -- `database` (String) — Database name. -- `table` (String) — Table name. -- `name` (String) — Column name. -- `type` (String) — Column type. -- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) para el valor predeterminado, o una cadena vacía si no está definida. -- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined. -- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. -- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. -- `marks_bytes` (UInt64) — The size of marks, in bytes. -- `comment` (String) — Comment on the column, or an empty string if it is not defined. -- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression. -- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. -- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. -- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. - -## sistema.colaborador {#system-contributors} - -Contiene información sobre los colaboradores. Todos los constributores en orden aleatorio. El orden es aleatorio en el momento de la ejecución de la consulta. - -Columna: - -- `name` (String) — Contributor (author) name from git log. - -**Ejemplo** - -``` sql -SELECT * FROM system.contributors LIMIT 10 -``` - -``` text -┌─name─────────────┐ -│ Olga Khvostikova │ -│ Max Vetrov │ -│ LiuYangkuan │ -│ svladykin │ -│ zamulla │ -│ Šimon Podlipský │ -│ BayoNet │ -│ Ilya Khomutov │ -│ Amy Krishnevsky │ -│ Loud_Scream │ -└──────────────────┘ -``` - -Para descubrirlo en la tabla, use una consulta: - -``` sql -SELECT * FROM system.contributors WHERE name='Olga Khvostikova' -``` - -``` text -┌─name─────────────┐ -│ Olga Khvostikova │ -└──────────────────┘ -``` - -## sistema.base {#system-databases} - -Esta tabla contiene una sola columna String llamada ‘name’ – the name of a database. -Cada base de datos que el servidor conoce tiene una entrada correspondiente en la tabla. -Esta tabla del sistema se utiliza para implementar el `SHOW DATABASES` consulta. - -## sistema.detached_parts {#system_tables-detached_parts} - -Contiene información sobre piezas separadas de [Método de codificación de datos:](../engines/table-engines/mergetree-family/mergetree.md) tabla. El `reason` columna especifica por qué se separó la pieza. Para las piezas separadas por el usuario, el motivo está vacío. Tales partes se pueden unir con [ALTER TABLE ATTACH PARTITION\|PART](../sql-reference/statements/alter.md#alter_attach-partition) comando. Para obtener la descripción de otras columnas, consulte [sistema.parte](#system_tables-parts). Si el nombre de la pieza no es válido, los valores de algunas columnas pueden ser `NULL`. Tales partes se pueden eliminar con [ALTER TABLE DROP DETACHED PART](../sql-reference/statements/alter.md#alter_drop-detached). - -## sistema.diccionario {#system_tables-dictionaries} - -Contiene información sobre [diccionarios externos](../sql-reference/dictionaries/external-dictionaries/external-dicts.md). - -Columna: - -- `database` ([Cadena](../sql-reference/data-types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries. -- `name` ([Cadena](../sql-reference/data-types/string.md)) — [Nombre del diccionario](../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md). -- `status` ([Enum8](../sql-reference/data-types/enum.md)) — Dictionary status. Possible values: - - `NOT_LOADED` — Dictionary was not loaded because it was not used. - - `LOADED` — Dictionary loaded successfully. - - `FAILED` — Unable to load the dictionary as a result of an error. - - `LOADING` — Dictionary is loading now. - - `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../sql-reference/statements/system.md#query_language-system-reload-dictionary) consulta, tiempo de espera, configuración del diccionario ha cambiado). - - `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now. -- `origin` ([Cadena](../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary. -- `type` ([Cadena](../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [Almacenamiento de diccionarios en la memoria](../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). -- `key` — [Tipo de llave](../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key): Clave numérica ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) or Сomposite key ([Cadena](../sql-reference/data-types/string.md)) — form “(type 1, type 2, …, type n)”. -- `attribute.names` ([Matriz](../sql-reference/data-types/array.md)([Cadena](../sql-reference/data-types/string.md))) — Array of [nombres de atributos](../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) proporcionada por el diccionario. -- `attribute.types` ([Matriz](../sql-reference/data-types/array.md)([Cadena](../sql-reference/data-types/string.md))) — Corresponding array of [tipos de atributos](../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) que son proporcionados por el diccionario. -- `bytes_allocated` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. -- `query_count` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. -- `hit_rate` ([Float64](../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. -- `element_count` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary. -- `load_factor` ([Float64](../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). -- `source` ([Cadena](../sql-reference/data-types/string.md)) — Text describing the [fuente de datos](../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) para el diccionario. -- `lifetime_min` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [vida](../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) del diccionario en la memoria, después de lo cual ClickHouse intenta volver a cargar el diccionario (si `invalidate_query` está configurado, entonces solo si ha cambiado). Establecer en segundos. -- `lifetime_max` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [vida](../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) del diccionario en la memoria, después de lo cual ClickHouse intenta volver a cargar el diccionario (si `invalidate_query` está configurado, entonces solo si ha cambiado). Establecer en segundos. -- `loading_start_time` ([FechaHora](../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary. -- `last_successful_update_time` ([FechaHora](../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes. -- `loading_duration` ([Float32](../sql-reference/data-types/float.md)) — Duration of a dictionary loading. -- `last_exception` ([Cadena](../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. - -**Ejemplo** - -Configurar el diccionario. - -``` sql -CREATE DICTIONARY dictdb.dict -( - `key` Int64 DEFAULT -1, - `value_default` String DEFAULT 'world', - `value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)' -) -PRIMARY KEY key -SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb')) -LIFETIME(MIN 0 MAX 1) -LAYOUT(FLAT()) -``` - -Asegúrese de que el diccionario esté cargado. - -``` sql -SELECT * FROM system.dictionaries -``` - -``` text -┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐ -│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ -└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ -``` - -## sistema.evento {#system_tables-events} - -Contiene información sobre el número de eventos que se han producido en el sistema. Por ejemplo, en la tabla, puede encontrar cuántos `SELECT` las consultas se procesaron desde que se inició el servidor ClickHouse. - -Columna: - -- `event` ([Cadena](../sql-reference/data-types/string.md)) — Event name. -- `value` ([UInt64](../sql-reference/data-types/int-uint.md)) — Number of events occurred. -- `description` ([Cadena](../sql-reference/data-types/string.md)) — Event description. - -**Ejemplo** - -``` sql -SELECT * FROM system.events LIMIT 5 -``` - -``` text -┌─event─────────────────────────────────┬─value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ Query │ 12 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ -│ SelectQuery │ 8 │ Same as Query, but only for SELECT queries. │ -│ FileOpen │ 73 │ Number of files opened. │ -│ ReadBufferFromFileDescriptorRead │ 155 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ -│ ReadBufferFromFileDescriptorReadBytes │ 9931 │ Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. │ -└───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -**Ver también** - -- [sistema.asynchronous_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [sistema.métricas](#system_tables-metrics) — Contains instantly calculated metrics. -- [sistema.metric_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. -- [Monitoreo](monitoring.md) — Base concepts of ClickHouse monitoring. - -## sistema.función {#system-functions} - -Contiene información sobre funciones normales y agregadas. - -Columna: - -- `name`(`String`) – The name of the function. -- `is_aggregate`(`UInt8`) — Whether the function is aggregate. - -## sistema.graphite_retentions {#system-graphite-retentions} - -Contiene información sobre los parámetros [graphite_rollup](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) que se utilizan en tablas con [\*GraphiteMergeTree](../engines/table-engines/mergetree-family/graphitemergetree.md) motor. - -Columna: - -- `config_name` (Cadena) - `graphite_rollup` nombre del parámetro. -- `regexp` (Cadena) - Un patrón para el nombre de la métrica. -- `function` (String) - El nombre de la función de agregación. -- `age` (UInt64) - La edad mínima de los datos en segundos. -- `precision` (UInt64) - Cómo definir con precisión la edad de los datos en segundos. -- `priority` (UInt16) - Prioridad de patrón. -- `is_default` (UInt8) - Si el patrón es el predeterminado. -- `Tables.database` (Array(String)) - Matriz de nombres de tablas de base de datos que utilizan `config_name` parámetro. -- `Tables.table` (Array(String)) - Matriz de nombres de tablas que utilizan `config_name` parámetro. - -## sistema.fusionar {#system-merges} - -Contiene información sobre fusiones y mutaciones de piezas actualmente en proceso para tablas de la familia MergeTree. - -Columna: - -- `database` (String) — The name of the database the table is in. -- `table` (String) — Table name. -- `elapsed` (Float64) — The time elapsed (in seconds) since the merge started. -- `progress` (Float64) — The percentage of completed work from 0 to 1. -- `num_parts` (UInt64) — The number of pieces to be merged. -- `result_part_name` (String) — The name of the part that will be formed as the result of merging. -- `is_mutation` (UInt8) - 1 si este proceso es una mutación parte. -- `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks. -- `total_size_marks` (UInt64) — The total number of marks in the merged parts. -- `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed. -- `rows_read` (UInt64) — Number of rows read. -- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed. -- `rows_written` (UInt64) — Number of rows written. - -## sistema.métricas {#system_tables-metrics} - -Contiene métricas que pueden calcularse instantáneamente o tener un valor actual. Por ejemplo, el número de consultas procesadas simultáneamente o el retraso de réplica actual. Esta tabla está siempre actualizada. - -Columna: - -- `metric` ([Cadena](../sql-reference/data-types/string.md)) — Metric name. -- `value` ([Int64](../sql-reference/data-types/int-uint.md)) — Metric value. -- `description` ([Cadena](../sql-reference/data-types/string.md)) — Metric description. - -La lista de métricas admitidas que puede encontrar en el [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) archivo fuente de ClickHouse. - -**Ejemplo** - -``` sql -SELECT * FROM system.metrics LIMIT 10 -``` - -``` text -┌─metric─────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ Query │ 1 │ Number of executing queries │ -│ Merge │ 0 │ Number of executing background merges │ -│ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ -│ ReplicatedFetch │ 0 │ Number of data parts being fetched from replicas │ -│ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ -│ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ -│ BackgroundPoolTask │ 0 │ Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping) │ -│ BackgroundSchedulePoolTask │ 0 │ Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc. │ -│ DiskSpaceReservedForMerge │ 0 │ Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts. │ -│ DistributedSend │ 0 │ Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode. │ -└────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -**Ver también** - -- [sistema.asynchronous_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [sistema.evento](#system_tables-events) — Contains a number of events that occurred. -- [sistema.metric_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. -- [Monitoreo](monitoring.md) — Base concepts of ClickHouse monitoring. - -## sistema.metric_log {#system_tables-metric_log} - -Contiene el historial de valores de métricas de tablas `system.metrics` y `system.events`, periódicamente enjuagado al disco. -Para activar la recopilación de historial de métricas en `system.metric_log`, crear `/etc/clickhouse-server/config.d/metric_log.xml` con el siguiente contenido: - -``` xml - - - system - metric_log
- 7500 - 1000 -
-
-``` - -**Ejemplo** - -``` sql -SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical; -``` - -``` text -Row 1: -────── -event_date: 2020-02-18 -event_time: 2020-02-18 07:15:33 -milliseconds: 554 -ProfileEvent_Query: 0 -ProfileEvent_SelectQuery: 0 -ProfileEvent_InsertQuery: 0 -ProfileEvent_FileOpen: 0 -ProfileEvent_Seek: 0 -ProfileEvent_ReadBufferFromFileDescriptorRead: 1 -ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0 -ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0 -ProfileEvent_WriteBufferFromFileDescriptorWrite: 1 -ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0 -ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56 -... -CurrentMetric_Query: 0 -CurrentMetric_Merge: 0 -CurrentMetric_PartMutation: 0 -CurrentMetric_ReplicatedFetch: 0 -CurrentMetric_ReplicatedSend: 0 -CurrentMetric_ReplicatedChecks: 0 -... -``` - -**Ver también** - -- [sistema.asynchronous_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [sistema.evento](#system_tables-events) — Contains a number of events that occurred. -- [sistema.métricas](#system_tables-metrics) — Contains instantly calculated metrics. -- [Monitoreo](monitoring.md) — Base concepts of ClickHouse monitoring. - -## sistema.numero {#system-numbers} - -Esta tabla contiene una única columna UInt64 llamada ‘number’ que contiene casi todos los números naturales a partir de cero. -Puede usar esta tabla para pruebas, o si necesita hacer una búsqueda de fuerza bruta. -Las lecturas de esta tabla no están paralelizadas. - -## sistema.Números_mt {#system-numbers-mt} - -Lo mismo que ‘system.numbers’ pero las lecturas están paralelizadas. Los números se pueden devolver en cualquier orden. -Se utiliza para pruebas. - -## sistema.una {#system-one} - -Esta tabla contiene una sola fila con una ‘dummy’ Columna UInt8 que contiene el valor 0. -Esta tabla se utiliza si una consulta SELECT no especifica la cláusula FROM. -Esto es similar a la tabla DUAL que se encuentra en otros DBMS. - -## sistema.parte {#system_tables-parts} - -Contiene información sobre partes de [Método de codificación de datos:](../engines/table-engines/mergetree-family/mergetree.md) tabla. - -Cada fila describe una parte de datos. - -Columna: - -- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../sql-reference/statements/alter.md#query_language_queries_alter) consulta. - - Formato: - - - `YYYYMM` para la partición automática por mes. - - `any_string` al particionar manualmente. - -- `name` (`String`) – Name of the data part. - -- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it's used in a table. Otherwise, it's deleted. Inactive data parts remain after merging. - -- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` por la granularidad del índice (generalmente 8192) (esta sugerencia no funciona para la granularidad adaptativa). - -- `rows` (`UInt64`) – The number of rows. - -- `bytes_on_disk` (`UInt64`) – Total size of all the data part files in bytes. - -- `data_compressed_bytes` (`UInt64`) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included. - -- `data_uncompressed_bytes` (`UInt64`) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included. - -- `marks_bytes` (`UInt64`) – The size of the file with marks. - -- `modification_time` (`DateTime`) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\| - -- `remove_time` (`DateTime`) – The time when the data part became inactive. - -- `refcount` (`UInt32`) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges. - -- `min_date` (`Date`) – The minimum value of the date key in the data part. - -- `max_date` (`Date`) – The maximum value of the date key in the data part. - -- `min_time` (`DateTime`) – The minimum value of the date and time key in the data part. - -- `max_time`(`DateTime`) – The maximum value of the date and time key in the data part. - -- `partition_id` (`String`) – ID of the partition. - -- `min_block_number` (`UInt64`) – The minimum number of data parts that make up the current part after merging. - -- `max_block_number` (`UInt64`) – The maximum number of data parts that make up the current part after merging. - -- `level` (`UInt32`) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts. - -- `data_version` (`UInt64`) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`). - -- `primary_key_bytes_in_memory` (`UInt64`) – The amount of memory (in bytes) used by primary key values. - -- `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. - -- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../sql-reference/statements/alter.md#alter_freeze-partition) - -- `database` (`String`) – Name of the database. - -- `table` (`String`) – Name of the table. - -- `engine` (`String`) – Name of the table engine without parameters. - -- `path` (`String`) – Absolute path to the folder with data part files. - -- `disk` (`String`) – Name of a disk that stores the data part. - -- `hash_of_all_files` (`String`) – [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) de archivos comprimidos. - -- `hash_of_uncompressed_files` (`String`) – [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) de archivos sin comprimir (archivos con marcas, archivo de índice, etc.). - -- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) de datos en los archivos comprimidos como si estuvieran descomprimidos. - -- `bytes` (`UInt64`) – Alias for `bytes_on_disk`. - -- `marks_size` (`UInt64`) – Alias for `marks_bytes`. - -## sistema.part_log {#system_tables-part-log} - -El `system.part_log` se crea sólo si el [part_log](server-configuration-parameters/settings.md#server_configuration_parameters-part-log) se especifica la configuración del servidor. - -Esta tabla contiene información sobre eventos que ocurrieron con [partes de datos](../engines/table-engines/mergetree-family/custom-partitioning-key.md) en el [Método de codificación de datos:](../engines/table-engines/mergetree-family/mergetree.md) tablas familiares, como agregar o fusionar datos. - -El `system.part_log` contiene las siguientes columnas: - -- `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values: - - `NEW_PART` — Inserting of a new data part. - - `MERGE_PARTS` — Merging of data parts. - - `DOWNLOAD_PART` — Downloading a data part. - - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../sql-reference/statements/alter.md#alter_detach-partition). - - `MUTATE_PART` — Mutating of a data part. - - `MOVE_PART` — Moving the data part from the one disk to another one. -- `event_date` (Date) — Event date. -- `event_time` (DateTime) — Event time. -- `duration_ms` (UInt64) — Duration. -- `database` (String) — Name of the database the data part is in. -- `table` (String) — Name of the table the data part is in. -- `part_name` (String) — Name of the data part. -- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ valor si la partición es por `tuple()`. -- `rows` (UInt64) — The number of rows in the data part. -- `size_in_bytes` (UInt64) — Size of the data part in bytes. -- `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge). -- `bytes_uncompressed` (UInt64) — Size of uncompressed bytes. -- `read_rows` (UInt64) — The number of rows was read during the merge. -- `read_bytes` (UInt64) — The number of bytes was read during the merge. -- `error` (UInt16) — The code number of the occurred error. -- `exception` (String) — Text message of the occurred error. - -El `system.part_log` se crea después de la primera inserción de datos `MergeTree` tabla. - -## sistema.procesa {#system_tables-processes} - -Esta tabla del sistema se utiliza para implementar el `SHOW PROCESSLIST` consulta. - -Columna: - -- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` usuario. El campo contiene el nombre de usuario para una consulta específica, no para una consulta que esta consulta inició. -- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` en el servidor de solicitud de consulta. -- `elapsed` (Float64) – The time in seconds since request execution started. -- `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. -- `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. -- `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. -- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [Método de codificación de datos:](../operations/settings/query-complexity.md#settings_max_memory_usage) configuración. -- `query` (String) – The query text. For `INSERT`, no incluye los datos para insertar. -- `query_id` (String) – Query ID, if defined. - -## sistema.text_log {#system_tables-text_log} - -Contiene entradas de registro. El nivel de registro que va a esta tabla se puede limitar con `text_log.level` configuración del servidor. - -Columna: - -- `event_date` (`Date`) - Fecha de la entrada. -- `event_time` (`DateTime`) - Hora de la entrada. -- `microseconds` (`UInt32`) - Microsegundos de la entrada. -- `thread_name` (String) — Name of the thread from which the logging was done. -- `thread_id` (UInt64) — OS thread ID. -- `level` (`Enum8`) - Nivel de entrada. - - `'Fatal' = 1` - - `'Critical' = 2` - - `'Error' = 3` - - `'Warning' = 4` - - `'Notice' = 5` - - `'Information' = 6` - - `'Debug' = 7` - - `'Trace' = 8` -- `query_id` (`String`) - ID de la consulta. -- `logger_name` (`LowCardinality(String)`) - Name of the logger (i.e. `DDLWorker`) -- `message` (`String`) - El mensaje en sí. -- `revision` (`UInt32`) - Revisión de ClickHouse. -- `source_file` (`LowCardinality(String)`) - Archivo de origen desde el que se realizó el registro. -- `source_line` (`UInt64`) - Línea de origen desde la que se realizó el registro. - -## sistema.query_log {#system_tables-query_log} - -Contiene información sobre la ejecución de consultas. Para cada consulta, puede ver la hora de inicio del procesamiento, la duración del procesamiento, los mensajes de error y otra información. - -!!! note "Nota" - La tabla no contiene datos de entrada para `INSERT` consulta. - -ClickHouse crea esta tabla sólo si el [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) se especifica el parámetro server. Este parámetro establece las reglas de registro, como el intervalo de registro o el nombre de la tabla en la que se registrarán las consultas. - -Para habilitar el registro de consultas, [Log_queries](settings/settings.md#settings-log-queries) parámetro a 1. Para obtener más información, consulte el [Configuración](settings/settings.md) apartado. - -El `system.query_log` tabla registra dos tipos de consultas: - -1. Consultas iniciales ejecutadas directamente por el cliente. -2. Consultas secundarias iniciadas por otras consultas (para la ejecución de consultas distribuidas). Para estos tipos de consultas, la información sobre las consultas principales se muestra en el `initial_*` columna. - -Columna: - -- `type` (`Enum8`) — Type of event that occurred when executing the query. Values: - - `'QueryStart' = 1` — Successful start of query execution. - - `'QueryFinish' = 2` — Successful end of query execution. - - `'ExceptionBeforeStart' = 3` — Exception before the start of query execution. - - `'ExceptionWhileProcessing' = 4` — Exception during the query execution. -- `event_date` (Date) — Query starting date. -- `event_time` (DateTime) — Query starting time. -- `query_start_time` (DateTime) — Start time of query execution. -- `query_duration_ms` (UInt64) — Duration of query execution. -- `read_rows` (UInt64) — Number of read rows. -- `read_bytes` (UInt64) — Number of read bytes. -- `written_rows` (UInt64) — For `INSERT` consultas, el número de filas escritas. Para otras consultas, el valor de la columna es 0. -- `written_bytes` (UInt64) — For `INSERT` consultas, el número de bytes escritos. Para otras consultas, el valor de la columna es 0. -- `result_rows` (UInt64) — Number of rows in the result. -- `result_bytes` (UInt64) — Number of bytes in the result. -- `memory_usage` (UInt64) — Memory consumption by the query. -- `query` (String) — Query string. -- `exception` (String) — Exception message. -- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully. -- `is_initial_query` (UInt8) — Query type. Possible values: - - 1 — Query was initiated by the client. - - 0 — Query was initiated by another query for distributed query execution. -- `user` (String) — Name of the user who initiated the current query. -- `query_id` (String) — ID of the query. -- `address` (IPv6) — IP address that was used to make the query. -- `port` (UInt16) — The client port that was used to make the query. -- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). -- `initial_query_id` (String) — ID of the initial query (for distributed query execution). -- `initial_address` (IPv6) — IP address that the parent query was launched from. -- `initial_port` (UInt16) — The client port that was used to make the parent query. -- `interface` (UInt8) — Interface that the query was initiated from. Possible values: - - 1 — TCP. - - 2 — HTTP. -- `os_user` (String) — OS's username who runs [Casa de clics-cliente](../interfaces/cli.md). -- `client_hostname` (String) — Hostname of the client machine where the [Casa de clics-cliente](../interfaces/cli.md) o se ejecuta otro cliente TCP. -- `client_name` (String) — The [Casa de clics-cliente](../interfaces/cli.md) o otro nombre de cliente TCP. -- `client_revision` (UInt32) — Revision of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_major` (UInt32) — Major version of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_minor` (UInt32) — Minor version of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_patch` (UInt32) — Patch component of the [Casa de clics-cliente](../interfaces/cli.md) o otra versión de cliente TCP. -- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: - - 0 — The query was launched from the TCP interface. - - 1 — `GET` se utilizó el método. - - 2 — `POST` se utilizó el método. -- `http_user_agent` (String) — The `UserAgent` encabezado pasado en la solicitud HTTP. -- `quota_key` (String) — The “quota key” especificado en el [cuota](quotas.md) ajuste (ver `keyed`). -- `revision` (UInt32) — ClickHouse revision. -- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [sistema.evento](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` columna. -- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parámetro a 1. -- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` columna. - -Cada consulta crea una o dos filas en el `query_log` tabla, dependiendo del estado de la consulta: - -1. Si la ejecución de la consulta se realiza correctamente, se crean dos eventos con los tipos 1 y 2 (consulte `type` columna). -2. Si se produjo un error durante el procesamiento de la consulta, se crean dos eventos con los tipos 1 y 4. -3. Si se produjo un error antes de iniciar la consulta, se crea un solo evento con el tipo 3. - -De forma predeterminada, los registros se agregan a la tabla a intervalos de 7,5 segundos. Puede establecer este intervalo en el [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) configuración del servidor (consulte el `flush_interval_milliseconds` parámetro). Para vaciar los registros a la fuerza desde el búfer de memoria a la tabla, utilice `SYSTEM FLUSH LOGS` consulta. - -Cuando la tabla se elimina manualmente, se creará automáticamente sobre la marcha. Tenga en cuenta que se eliminarán todos los registros anteriores. - -!!! note "Nota" - El período de almacenamiento para los registros es ilimitado. Los registros no se eliminan automáticamente de la tabla. Debe organizar la eliminación de registros obsoletos usted mismo. - -Puede especificar una clave de partición arbitraria `system.query_log` mesa en el [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) configuración del servidor (consulte el `partition_by` parámetro). - -## sistema.Sistema abierto {#system_tables-query-thread-log} - -La tabla contiene información sobre cada subproceso de ejecución de consultas. - -ClickHouse crea esta tabla sólo si el [Sistema abierto.](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) se especifica el parámetro server. Este parámetro establece las reglas de registro, como el intervalo de registro o el nombre de la tabla en la que se registrarán las consultas. - -Para habilitar el registro de consultas, [Log_query_threads](settings/settings.md#settings-log-query-threads) parámetro a 1. Para obtener más información, consulte el [Configuración](settings/settings.md) apartado. - -Columna: - -- `event_date` (Date) — the date when the thread has finished execution of the query. -- `event_time` (DateTime) — the date and time when the thread has finished execution of the query. -- `query_start_time` (DateTime) — Start time of query execution. -- `query_duration_ms` (UInt64) — Duration of query execution. -- `read_rows` (UInt64) — Number of read rows. -- `read_bytes` (UInt64) — Number of read bytes. -- `written_rows` (UInt64) — For `INSERT` consultas, el número de filas escritas. Para otras consultas, el valor de la columna es 0. -- `written_bytes` (UInt64) — For `INSERT` consultas, el número de bytes escritos. Para otras consultas, el valor de la columna es 0. -- `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. -- `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. -- `thread_name` (String) — Name of the thread. -- `thread_number` (UInt32) — Internal thread ID. -- `os_thread_id` (Int32) — OS thread ID. -- `master_thread_id` (UInt64) — OS initial ID of initial thread. -- `query` (String) — Query string. -- `is_initial_query` (UInt8) — Query type. Possible values: - - 1 — Query was initiated by the client. - - 0 — Query was initiated by another query for distributed query execution. -- `user` (String) — Name of the user who initiated the current query. -- `query_id` (String) — ID of the query. -- `address` (IPv6) — IP address that was used to make the query. -- `port` (UInt16) — The client port that was used to make the query. -- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). -- `initial_query_id` (String) — ID of the initial query (for distributed query execution). -- `initial_address` (IPv6) — IP address that the parent query was launched from. -- `initial_port` (UInt16) — The client port that was used to make the parent query. -- `interface` (UInt8) — Interface that the query was initiated from. Possible values: - - 1 — TCP. - - 2 — HTTP. -- `os_user` (String) — OS's username who runs [Casa de clics-cliente](../interfaces/cli.md). -- `client_hostname` (String) — Hostname of the client machine where the [Casa de clics-cliente](../interfaces/cli.md) o se ejecuta otro cliente TCP. -- `client_name` (String) — The [Casa de clics-cliente](../interfaces/cli.md) o otro nombre de cliente TCP. -- `client_revision` (UInt32) — Revision of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_major` (UInt32) — Major version of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_minor` (UInt32) — Minor version of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_patch` (UInt32) — Patch component of the [Casa de clics-cliente](../interfaces/cli.md) o otra versión de cliente TCP. -- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: - - 0 — The query was launched from the TCP interface. - - 1 — `GET` se utilizó el método. - - 2 — `POST` se utilizó el método. -- `http_user_agent` (String) — The `UserAgent` encabezado pasado en la solicitud HTTP. -- `quota_key` (String) — The “quota key” especificado en el [cuota](quotas.md) ajuste (ver `keyed`). -- `revision` (UInt32) — ClickHouse revision. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [sistema.evento](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` columna. - -De forma predeterminada, los registros se agregan a la tabla a intervalos de 7,5 segundos. Puede establecer este intervalo en el [Sistema abierto.](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) configuración del servidor (consulte el `flush_interval_milliseconds` parámetro). Para vaciar los registros a la fuerza desde el búfer de memoria a la tabla, utilice `SYSTEM FLUSH LOGS` consulta. - -Cuando la tabla se elimina manualmente, se creará automáticamente sobre la marcha. Tenga en cuenta que se eliminarán todos los registros anteriores. - -!!! note "Nota" - El período de almacenamiento para los registros es ilimitado. Los registros no se eliminan automáticamente de la tabla. Debe organizar la eliminación de registros obsoletos usted mismo. - -Puede especificar una clave de partición arbitraria `system.query_thread_log` mesa en el [Sistema abierto.](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) configuración del servidor (consulte el `partition_by` parámetro). - -## sistema.trace_log {#system_tables-trace_log} - -Contiene seguimientos de pila recopilados por el generador de perfiles de consultas de muestreo. - -ClickHouse crea esta tabla cuando el [trace_log](server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) se establece la sección de configuración del servidor. También el [query_profiler_real_time_period_ns](settings/settings.md#query_profiler_real_time_period_ns) y [Los resultados de la prueba](settings/settings.md#query_profiler_cpu_time_period_ns) los ajustes deben establecerse. - -Para analizar los registros, utilice el `addressToLine`, `addressToSymbol` y `demangle` funciones de inspección. - -Columna: - -- `event_date` ([Fecha](../sql-reference/data-types/date.md)) — Date of sampling moment. - -- `event_time` ([FechaHora](../sql-reference/data-types/datetime.md)) — Timestamp of the sampling moment. - -- `timestamp_ns` ([UInt64](../sql-reference/data-types/int-uint.md)) — Timestamp of the sampling moment in nanoseconds. - -- `revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ClickHouse server build revision. - - Cuando se conecta al servidor por `clickhouse-client`, ves la cadena similar a `Connected to ClickHouse server version 19.18.1 revision 54429.`. Este campo contiene el `revision`, pero no el `version` de un servidor. - -- `timer_type` ([Enum8](../sql-reference/data-types/enum.md)) — Timer type: - - - `Real` representa el tiempo del reloj de pared. - - `CPU` representa el tiempo de CPU. - -- `thread_number` ([UInt32](../sql-reference/data-types/int-uint.md)) — Thread identifier. - -- `query_id` ([Cadena](../sql-reference/data-types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query_log](#system_tables-query_log) tabla del sistema. - -- `trace` ([Matriz (UInt64)](../sql-reference/data-types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. - -**Ejemplo** - -``` sql -SELECT * FROM system.trace_log LIMIT 1 \G -``` - -``` text -Row 1: -────── -event_date: 2019-11-15 -event_time: 2019-11-15 15:09:38 -revision: 54428 -timer_type: Real -thread_number: 48 -query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 -trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] -``` - -## sistema.Replica {#system_tables-replicas} - -Contiene información y estado de las tablas replicadas que residen en el servidor local. -Esta tabla se puede utilizar para el monitoreo. La tabla contiene una fila para cada tabla Replicated\*. - -Ejemplo: - -``` sql -SELECT * -FROM system.replicas -WHERE table = 'visits' -FORMAT Vertical -``` - -``` text -Row 1: -────── -database: merge -table: visits -engine: ReplicatedCollapsingMergeTree -is_leader: 1 -can_become_leader: 1 -is_readonly: 0 -is_session_expired: 0 -future_parts: 1 -parts_to_check: 0 -zookeeper_path: /clickhouse/tables/01-06/visits -replica_name: example01-06-1.yandex.ru -replica_path: /clickhouse/tables/01-06/visits/replicas/example01-06-1.yandex.ru -columns_version: 9 -queue_size: 1 -inserts_in_queue: 0 -merges_in_queue: 1 -part_mutations_in_queue: 0 -queue_oldest_time: 2020-02-20 08:34:30 -inserts_oldest_time: 1970-01-01 00:00:00 -merges_oldest_time: 2020-02-20 08:34:30 -part_mutations_oldest_time: 1970-01-01 00:00:00 -oldest_part_to_get: -oldest_part_to_merge_to: 20200220_20284_20840_7 -oldest_part_to_mutate_to: -log_max_index: 596273 -log_pointer: 596274 -last_queue_update: 2020-02-20 08:34:32 -absolute_delay: 0 -total_replicas: 2 -active_replicas: 2 -``` - -Columna: - -- `database` (`String`) - Nombre de la base de datos -- `table` (`String`) - Nombre de la tabla -- `engine` (`String`) - Nombre del motor de tabla -- `is_leader` (`UInt8`) - Si la réplica es la líder. - Sólo una réplica a la vez puede ser el líder. El líder es responsable de seleccionar las fusiones de fondo para realizar. - Tenga en cuenta que las escrituras se pueden realizar en cualquier réplica que esté disponible y tenga una sesión en ZK, independientemente de si es un líder. -- `can_become_leader` (`UInt8`) - Si la réplica puede ser elegida como líder. -- `is_readonly` (`UInt8`) - Si la réplica está en modo de sólo lectura. - Este modo se activa si la configuración no tiene secciones con ZooKeeper, si se produce un error desconocido al reinicializar sesiones en ZooKeeper y durante la reinicialización de sesiones en ZooKeeper. -- `is_session_expired` (`UInt8`) - la sesión con ZooKeeper ha expirado. Básicamente lo mismo que `is_readonly`. -- `future_parts` (`UInt32`) - El número de partes de datos que aparecerán como resultado de INSERTs o fusiones que aún no se han realizado. -- `parts_to_check` (`UInt32`) - El número de partes de datos en la cola para la verificación. Una pieza se coloca en la cola de verificación si existe la sospecha de que podría estar dañada. -- `zookeeper_path` (`String`) - Ruta de acceso a los datos de la tabla en ZooKeeper. -- `replica_name` (`String`) - Nombre de réplica en ZooKeeper. Diferentes réplicas de la misma tabla tienen diferentes nombres. -- `replica_path` (`String`) - Ruta de acceso a los datos de réplica en ZooKeeper. Lo mismo que concatenar ‘zookeeper_path/replicas/replica_path’. -- `columns_version` (`Int32`) - Número de versión de la estructura de la tabla. Indica cuántas veces se realizó ALTER. Si las réplicas tienen versiones diferentes, significa que algunas réplicas aún no han hecho todas las ALTER. -- `queue_size` (`UInt32`) - Tamaño de la cola para las operaciones en espera de ser realizadas. Las operaciones incluyen insertar bloques de datos, fusiones y otras acciones. Por lo general, coincide con `future_parts`. -- `inserts_in_queue` (`UInt32`) - Número de inserciones de bloques de datos que deben realizarse. Las inserciones generalmente se replican con bastante rapidez. Si este número es grande, significa que algo anda mal. -- `merges_in_queue` (`UInt32`) - El número de fusiones en espera de hacerse. A veces las fusiones son largas, por lo que este valor puede ser mayor que cero durante mucho tiempo. -- `part_mutations_in_queue` (`UInt32`) - El número de mutaciones a la espera de hacerse. -- `queue_oldest_time` (`DateTime`) - Si `queue_size` mayor que 0, muestra cuándo se agregó la operación más antigua a la cola. -- `inserts_oldest_time` (`DateTime`) - Ver `queue_oldest_time` -- `merges_oldest_time` (`DateTime`) - Ver `queue_oldest_time` -- `part_mutations_oldest_time` (`DateTime`) - Ver `queue_oldest_time` - -Las siguientes 4 columnas tienen un valor distinto de cero solo cuando hay una sesión activa con ZK. - -- `log_max_index` (`UInt64`) - Número máximo de inscripción en el registro de actividad general. -- `log_pointer` (`UInt64`) - Número máximo de entrada en el registro de actividad general que la réplica copió en su cola de ejecución, más uno. Si `log_pointer` es mucho más pequeño que `log_max_index`, algo está mal. -- `last_queue_update` (`DateTime`) - Cuando la cola se actualizó la última vez. -- `absolute_delay` (`UInt64`) - ¿Qué tan grande retraso en segundos tiene la réplica actual. -- `total_replicas` (`UInt8`) - El número total de réplicas conocidas de esta tabla. -- `active_replicas` (`UInt8`) - El número de réplicas de esta tabla que tienen una sesión en ZooKeeper (es decir, el número de réplicas en funcionamiento). - -Si solicita todas las columnas, la tabla puede funcionar un poco lentamente, ya que se realizan varias lecturas de ZooKeeper para cada fila. -Si no solicita las últimas 4 columnas (log_max_index, log_pointer, total_replicas, active_replicas), la tabla funciona rápidamente. - -Por ejemplo, puede verificar que todo funcione correctamente de esta manera: - -``` sql -SELECT - database, - table, - is_leader, - is_readonly, - is_session_expired, - future_parts, - parts_to_check, - columns_version, - queue_size, - inserts_in_queue, - merges_in_queue, - log_max_index, - log_pointer, - total_replicas, - active_replicas -FROM system.replicas -WHERE - is_readonly - OR is_session_expired - OR future_parts > 20 - OR parts_to_check > 10 - OR queue_size > 20 - OR inserts_in_queue > 10 - OR log_max_index - log_pointer > 10 - OR total_replicas < 2 - OR active_replicas < total_replicas -``` - -Si esta consulta no devuelve nada, significa que todo está bien. - -## sistema.configuración {#system-tables-system-settings} - -Contiene información sobre la configuración de sesión para el usuario actual. - -Columna: - -- `name` ([Cadena](../sql-reference/data-types/string.md)) — Setting name. -- `value` ([Cadena](../sql-reference/data-types/string.md)) — Setting value. -- `changed` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether a setting is changed from its default value. -- `description` ([Cadena](../sql-reference/data-types/string.md)) — Short setting description. -- `min` ([NULL](../sql-reference/data-types/nullable.md)([Cadena](../sql-reference/data-types/string.md))) — Minimum value of the setting, if any is set via [limitación](settings/constraints-on-settings.md#constraints-on-settings). Si la configuración no tiene ningún valor mínimo, contiene [NULL](../sql-reference/syntax.md#null-literal). -- `max` ([NULL](../sql-reference/data-types/nullable.md)([Cadena](../sql-reference/data-types/string.md))) — Maximum value of the setting, if any is set via [limitación](settings/constraints-on-settings.md#constraints-on-settings). Si la configuración no tiene ningún valor máximo, contiene [NULL](../sql-reference/syntax.md#null-literal). -- `readonly` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether the current user can change the setting: - - `0` — Current user can change the setting. - - `1` — Current user can't change the setting. - -**Ejemplo** - -En el ejemplo siguiente se muestra cómo obtener información sobre la configuración cuyo nombre contiene `min_i`. - -``` sql -SELECT * -FROM system.settings -WHERE name LIKE '%min_i%' -``` - -``` text -┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐ -│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ -│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ -│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ -└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘ -``` - -Uso de `WHERE changed` puede ser útil, por ejemplo, cuando se desea comprobar: - -- Si los ajustes de los archivos de configuración se cargan correctamente y están en uso. -- Configuración que cambió en la sesión actual. - - - -``` sql -SELECT * FROM system.settings WHERE changed AND name='load_balancing' -``` - -**Ver también** - -- [Configuración](settings/index.md#session-settings-intro) -- [Permisos para consultas](settings/permissions-for-queries.md#settings_readonly) -- [Restricciones en la configuración](settings/constraints-on-settings.md) - -## sistema.table_engines {#system.table_engines} - -``` text -┌─name───────────────────┬─value───────┐ -│ max_threads │ 8 │ -│ use_uncompressed_cache │ 0 │ -│ load_balancing │ random │ -│ max_memory_usage │ 10000000000 │ -└────────────────────────┴─────────────┘ -``` - -## sistema.merge_tree_settings {#system-merge_tree_settings} - -Contiene información sobre la configuración `MergeTree` tabla. - -Columna: - -- `name` (String) — Setting name. -- `value` (String) — Setting value. -- `description` (String) — Setting description. -- `type` (String) — Setting type (implementation specific string value). -- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. - -## sistema.table_engines {#system-table-engines} - -Contiene la descripción de los motores de tablas admitidos por el servidor y su información de soporte de características. - -Esta tabla contiene las siguientes columnas (el tipo de columna se muestra entre corchetes): - -- `name` (String) — The name of table engine. -- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` clausula. -- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [Índices de saltos](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes). -- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). -- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` y `SAMPLE_BY`. -- `supports_replication` (UInt8) — Flag that indicates if table engine supports [Replicación de datos](../engines/table-engines/mergetree-family/replication.md). -- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. - -Ejemplo: - -``` sql -SELECT * -FROM system.table_engines -WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') -``` - -``` text -┌─name──────────────────────────┬─supports_settings─┬─supports_skipping_indices─┬─supports_sort_order─┬─supports_ttl─┬─supports_replication─┬─supports_deduplication─┐ -│ Kafka │ 1 │ 0 │ 0 │ 0 │ 0 │ 0 │ -│ MergeTree │ 1 │ 1 │ 1 │ 1 │ 0 │ 0 │ -│ ReplicatedCollapsingMergeTree │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ -└───────────────────────────────┴───────────────────┴───────────────────────────┴─────────────────────┴──────────────┴──────────────────────┴────────────────────────┘ -``` - -**Ver también** - -- Familia MergeTree [cláusulas de consulta](../engines/table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) -- Kafka [configuración](../engines/table-engines/integrations/kafka.md#table_engine-kafka-creating-a-table) -- Unir [configuración](../engines/table-engines/special/join.md#join-limitations-and-settings) - -## sistema.tabla {#system-tables} - -Contiene metadatos de cada tabla que el servidor conoce. Las tablas separadas no se muestran en `system.tables`. - -Esta tabla contiene las siguientes columnas (el tipo de columna se muestra entre corchetes): - -- `database` (String) — The name of the database the table is in. - -- `name` (String) — Table name. - -- `engine` (String) — Table engine name (without parameters). - -- `is_temporary` (UInt8): marca que indica si la tabla es temporal. - -- `data_path` (String) - Ruta de acceso a los datos de la tabla en el sistema de archivos. - -- `metadata_path` (String) - Ruta de acceso a los metadatos de la tabla en el sistema de archivos. - -- `metadata_modification_time` (DateTime) - Hora de la última modificación de los metadatos de la tabla. - -- `dependencies_database` (Array(String)) - Dependencias de base de datos. - -- `dependencies_table` (Array(String)) - Dependencias de tabla ([Método de codificación de datos:](../engines/table-engines/special/materializedview.md) tablas basadas en la tabla actual). - -- `create_table_query` (String) - La consulta que se utilizó para crear la tabla. - -- `engine_full` (String) - Parámetros del motor de tabla. - -- `partition_key` (String) - La expresión de clave de partición especificada en la tabla. - -- `sorting_key` (String) - La expresión de clave de ordenación especificada en la tabla. - -- `primary_key` (String) - La expresión de clave principal especificada en la tabla. - -- `sampling_key` (String) - La expresión de clave de muestreo especificada en la tabla. - -- `storage_policy` (String) - La política de almacenamiento: - - - [Método de codificación de datos:](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - - [Distribuido](../engines/table-engines/special/distributed.md#distributed) - -- `total_rows` (Nullable(UInt64)) - Número total de filas, si es posible determinar rápidamente el número exacto de filas en la tabla, de lo contrario `Null` (incluyendo underying `Buffer` tabla). - -- `total_bytes` (Nullable(UInt64)) - Número total de bytes, si es posible determinar rápidamente el número exacto de bytes para la tabla en el almacenamiento, de lo contrario `Null` (**no** incluye cualquier almacenamiento subyacente). - - - If the table stores data on disk, returns used space on disk (i.e. compressed). - - Si la tabla almacena datos en la memoria, devuelve el número aproximado de bytes utilizados en la memoria. - -El `system.tables` se utiliza en `SHOW TABLES` implementación de consultas. - -## sistema.Zookeeper {#system-zookeeper} - -La tabla no existe si ZooKeeper no está configurado. Permite leer datos del clúster ZooKeeper definido en la configuración. -La consulta debe tener un ‘path’ condición de igualdad en la cláusula WHERE. Este es el camino en ZooKeeper para los niños para los que desea obtener datos. - -Consulta `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` salidas de datos para todos los niños en el `/clickhouse` nodo. -Para generar datos para todos los nodos raíz, escriba path = ‘/’. -Si la ruta especificada en ‘path’ no existe, se lanzará una excepción. - -Columna: - -- `name` (String) — The name of the node. -- `path` (String) — The path to the node. -- `value` (String) — Node value. -- `dataLength` (Int32) — Size of the value. -- `numChildren` (Int32) — Number of descendants. -- `czxid` (Int64) — ID of the transaction that created the node. -- `mzxid` (Int64) — ID of the transaction that last changed the node. -- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants. -- `ctime` (DateTime) — Time of node creation. -- `mtime` (DateTime) — Time of the last modification of the node. -- `version` (Int32) — Node version: the number of times the node was changed. -- `cversion` (Int32) — Number of added or removed descendants. -- `aversion` (Int32) — Number of changes to the ACL. -- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. - -Ejemplo: - -``` sql -SELECT * -FROM system.zookeeper -WHERE path = '/clickhouse/tables/01-08/visits/replicas' -FORMAT Vertical -``` - -``` text -Row 1: -────── -name: example01-08-1.yandex.ru -value: -czxid: 932998691229 -mzxid: 932998691229 -ctime: 2015-03-27 16:49:51 -mtime: 2015-03-27 16:49:51 -version: 0 -cversion: 47 -aversion: 0 -ephemeralOwner: 0 -dataLength: 0 -numChildren: 7 -pzxid: 987021031383 -path: /clickhouse/tables/01-08/visits/replicas - -Row 2: -────── -name: example01-08-2.yandex.ru -value: -czxid: 933002738135 -mzxid: 933002738135 -ctime: 2015-03-27 16:57:01 -mtime: 2015-03-27 16:57:01 -version: 0 -cversion: 37 -aversion: 0 -ephemeralOwner: 0 -dataLength: 0 -numChildren: 7 -pzxid: 987021252247 -path: /clickhouse/tables/01-08/visits/replicas -``` - -## sistema.mutación {#system_tables-mutations} - -La tabla contiene información sobre [mutación](../sql-reference/statements/alter.md#alter-mutations) de las tablas MergeTree y su progreso. Cada comando de mutación está representado por una sola fila. La tabla tiene las siguientes columnas: - -**base**, **tabla** - El nombre de la base de datos y la tabla a la que se aplicó la mutación. - -**mutation_id** - La identificación de la mutación. Para las tablas replicadas, estos identificadores corresponden a los nombres de znode `/mutations/` directorio en ZooKeeper. Para las tablas no duplicadas, los ID corresponden a los nombres de archivo en el directorio de datos de la tabla. - -**comando** - La cadena de comandos de mutación (la parte de la consulta después de `ALTER TABLE [db.]table`). - -**create_time** - Cuando este comando de mutación fue enviado para su ejecución. - -**block_numbers.partition_id**, **block_numbers.numero** - Una columna anidada. Para las mutaciones de tablas replicadas, contiene un registro para cada partición: el ID de partición y el número de bloque que fue adquirido por la mutación (en cada partición, solo se mutarán las partes que contienen bloques con números menores que el número de bloque adquirido por la mutación en esa partición). En tablas no replicadas, los números de bloque en todas las particiones forman una sola secuencia. Esto significa que para las mutaciones de tablas no replicadas, la columna contendrá un registro con un solo número de bloque adquirido por la mutación. - -**partes_a_do** - El número de partes de datos que deben mutarse para que finalice la mutación. - -**is_done** - Es la mutación hecho? Tenga en cuenta que incluso si `parts_to_do = 0` es posible que aún no se haya realizado una mutación de una tabla replicada debido a un INSERT de larga ejecución que creará una nueva parte de datos que deberá mutarse. - -Si hubo problemas con la mutación de algunas partes, las siguientes columnas contienen información adicional: - -**Método de codificación de datos:** - El nombre de la parte más reciente que no se pudo mutar. - -**Método de codificación de datos:** - El momento del fracaso de la mutación de la parte más reciente. - -**Método de codificación de datos:** - El mensaje de excepción que causó el error de mutación de parte más reciente. - -## sistema.disco {#system_tables-disks} - -Contiene información sobre los discos definidos en el [configuración del servidor](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). - -Columna: - -- `name` ([Cadena](../sql-reference/data-types/string.md)) — Name of a disk in the server configuration. -- `path` ([Cadena](../sql-reference/data-types/string.md)) — Path to the mount point in the file system. -- `free_space` ([UInt64](../sql-reference/data-types/int-uint.md)) — Free space on disk in bytes. -- `total_space` ([UInt64](../sql-reference/data-types/int-uint.md)) — Disk volume in bytes. -- `keep_free_space` ([UInt64](../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parámetro de configuración del disco. - -## sistema.almacenamiento_policies {#system_tables-storage_policies} - -Contiene información sobre las directivas de almacenamiento y los volúmenes [configuración del servidor](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). - -Columna: - -- `policy_name` ([Cadena](../sql-reference/data-types/string.md)) — Name of the storage policy. -- `volume_name` ([Cadena](../sql-reference/data-types/string.md)) — Volume name defined in the storage policy. -- `volume_priority` ([UInt64](../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration. -- `disks` ([Array(Cadena)](../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy. -- `max_data_part_size` ([UInt64](../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). -- `move_factor` ([Float64](../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. - -Si la directiva de almacenamiento contiene más de un volumen, la información de cada volumen se almacena en la fila individual de la tabla. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/system_tables/) diff --git a/docs/es/operations/tips.md b/docs/es/operations/tips.md deleted file mode 100644 index deb226450aa..00000000000 --- a/docs/es/operations/tips.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 58 -toc_title: Recomendaciones de uso ---- - -# Recomendaciones de uso {#usage-recommendations} - -## CPU Scaling Governor {#cpu-scaling-governor} - -Utilice siempre el `performance` gobernador de escala. El `on-demand` regulador de escala funciona mucho peor con una demanda constante. - -``` bash -$ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor -``` - -## Limitaciones de la CPU {#cpu-limitations} - -Los procesadores pueden sobrecalentarse. Utilizar `dmesg` para ver si la velocidad de reloj de la CPU era limitada debido al sobrecalentamiento. -La restricción también se puede establecer externamente en el nivel del centro de datos. Usted puede utilizar `turbostat` para controlarlo bajo una carga. - -## RAM {#ram} - -Para pequeñas cantidades de datos (hasta ~200 GB comprimidos), es mejor usar tanta memoria como el volumen de datos. -Para grandes cantidades de datos y al procesar consultas interactivas (en línea), debe usar una cantidad razonable de RAM (128 GB o más) para que el subconjunto de datos en caliente quepa en la memoria caché de páginas. -Incluso para volúmenes de datos de ~ 50 TB por servidor, el uso de 128 GB de RAM mejora significativamente el rendimiento de las consultas en comparación con 64 GB. - -No deshabilite el sobrecompromiso. Valor `cat /proc/sys/vm/overcommit_memory` debe ser 0 o 1. Ejecutar - -``` bash -$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory -``` - -## Páginas enormes {#huge-pages} - -Siempre deshabilite las páginas enormes transparentes. Interfiere con los asignadores de memoria, lo que conduce a una degradación significativa del rendimiento. - -``` bash -$ echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled -``` - -Utilizar `perf top` para ver el tiempo pasado en el kernel para la administración de memoria. -Las páginas enormes permanentes tampoco necesitan ser asignadas. - -## Subsistema de almacenamiento {#storage-subsystem} - -Si su presupuesto le permite usar SSD, use SSD. -Si no, use HDD. Los discos duros SATA 7200 RPM servirán. - -Dar preferencia a una gran cantidad de servidores con discos duros locales sobre un número menor de servidores con estantes de discos conectados. -Pero para almacenar archivos con consultas raras, los estantes funcionarán. - -## RAID {#raid} - -Al usar HDD, puede combinar su RAID-10, RAID-5, RAID-6 o RAID-50. -Para Linux, el software RAID es mejor (con `mdadm`). No recomendamos usar LVM. -Al crear RAID-10, seleccione el `far` diseño. -Si su presupuesto lo permite, elija RAID-10. - -Si tiene más de 4 discos, utilice RAID-6 (preferido) o RAID-50, en lugar de RAID-5. -Cuando use RAID-5, RAID-6 o RAID-50, siempre aumente stripe_cache_size, ya que el valor predeterminado generalmente no es la mejor opción. - -``` bash -$ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size -``` - -Calcule el número exacto a partir del número de dispositivos y el tamaño del bloque, utilizando la fórmula: `2 * num_devices * chunk_size_in_bytes / 4096`. - -Un tamaño de bloque de 1024 KB es suficiente para todas las configuraciones RAID. -Nunca ajuste el tamaño del bloque demasiado pequeño o demasiado grande. - -Puede usar RAID-0 en SSD. -Independientemente del uso de RAID, utilice siempre la replicación para la seguridad de los datos. - -Habilite NCQ con una cola larga. Para HDD, elija el programador CFQ, y para SSD, elija noop. No reduzca el ‘readahead’ configuración. -Para HDD, habilite la memoria caché de escritura. - -## Sistema de archivos {#file-system} - -Ext4 es la opción más confiable. Establecer las opciones de montaje `noatime, nobarrier`. -XFS también es adecuado, pero no ha sido probado tan a fondo con ClickHouse. -La mayoría de los otros sistemas de archivos también deberían funcionar bien. Los sistemas de archivos con asignación retrasada funcionan mejor. - -## Núcleo de Linux {#linux-kernel} - -No use un kernel de Linux obsoleto. - -## Red {#network} - -Si está utilizando IPv6, aumente el tamaño de la caché de ruta. -El kernel de Linux anterior a 3.2 tenía una multitud de problemas con la implementación de IPv6. - -Utilice al menos una red de 10 GB, si es posible. 1 Gb también funcionará, pero será mucho peor para parchear réplicas con decenas de terabytes de datos, o para procesar consultas distribuidas con una gran cantidad de datos intermedios. - -## ZooKeeper {#zookeeper} - -Probablemente ya esté utilizando ZooKeeper para otros fines. Puede usar la misma instalación de ZooKeeper, si aún no está sobrecargada. - -It's best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. - -Nunca debe usar scripts escritos manualmente para transferir datos entre diferentes clústeres de ZooKeeper, ya que el resultado será incorrecto para los nodos secuenciales. Nunca utilice el “zkcopy” utilidad por la misma razón: https://github.com/ksprojects/zkcopy/issues/15 - -Si desea dividir un clúster ZooKeeper existente en dos, la forma correcta es aumentar el número de sus réplicas y, a continuación, volver a configurarlo como dos clústeres independientes. - -No ejecute ZooKeeper en los mismos servidores que ClickHouse. Porque ZooKeeper es muy sensible a la latencia y ClickHouse puede utilizar todos los recursos del sistema disponibles. - -Con la configuración predeterminada, ZooKeeper es una bomba de tiempo: - -> El servidor ZooKeeper no eliminará archivos de instantáneas y registros antiguos cuando utilice la configuración predeterminada (consulte autopurge), y esto es responsabilidad del operador. - -Esta bomba debe ser desactivada. - -La configuración ZooKeeper (3.5.1) a continuación se usa en Yandex.Entorno de producción de Métrica al 20 de mayo de 2017: - -zoológico.Cómo: - -``` bash -# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html - -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=30000 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=10 - -maxClientCnxns=2000 - -maxSessionTimeout=60000000 -# the directory where the snapshot is stored. -dataDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '}}' }}/data -# Place the dataLogDir to a separate physical disc for better performance -dataLogDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '}}' }}/logs - -autopurge.snapRetainCount=10 -autopurge.purgeInterval=1 - - -# To avoid seeks ZooKeeper allocates space in the transaction log file in -# blocks of preAllocSize kilobytes. The default block size is 64M. One reason -# for changing the size of the blocks is to reduce the block size if snapshots -# are taken more often. (Also, see snapCount). -preAllocSize=131072 - -# Clients can submit requests faster than ZooKeeper can process them, -# especially if there are a lot of clients. To prevent ZooKeeper from running -# out of memory due to queued requests, ZooKeeper will throttle clients so that -# there is no more than globalOutstandingLimit outstanding requests in the -# system. The default limit is 1,000.ZooKeeper logs transactions to a -# transaction log. After snapCount transactions are written to a log file a -# snapshot is started and a new transaction log file is started. The default -# snapCount is 10,000. -snapCount=3000000 - -# If this option is defined, requests will be will logged to a trace file named -# traceFile.year.month.day. -#traceFile= - -# Leader accepts client connections. Default value is "yes". The leader machine -# coordinates updates. For higher update throughput at thes slight expense of -# read throughput the leader can be configured to not accept clients and focus -# on coordination. -leaderServes=yes - -standaloneEnabled=false -dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/zoo.cfg.dynamic -``` - -Versión Java: - -``` text -Java(TM) SE Runtime Environment (build 1.8.0_25-b17) -Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) -``` - -Parámetros de JVM: - -``` bash -NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} -ZOOCFGDIR=/etc/$NAME/conf - -# TODO this is really ugly -# How to find out, which jars are needed? -# seems, that log4j requires the log4j.properties file to be in the classpath -CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper/zookeeper-3.5.1-metrika.jar:/usr/share/zookeeper/slf4j-log4j12-1.7.5.jar:/usr/share/zookeeper/slf4j-api-1.7.5.jar:/usr/share/zookeeper/servlet-api-2.5-20081211.jar:/usr/share/zookeeper/netty-3.7.0.Final.jar:/usr/share/zookeeper/log4j-1.2.16.jar:/usr/share/zookeeper/jline-2.11.jar:/usr/share/zookeeper/jetty-util-6.1.26.jar:/usr/share/zookeeper/jetty-6.1.26.jar:/usr/share/zookeeper/javacc.jar:/usr/share/zookeeper/jackson-mapper-asl-1.9.11.jar:/usr/share/zookeeper/jackson-core-asl-1.9.11.jar:/usr/share/zookeeper/commons-cli-1.2.jar:/usr/src/java/lib/*.jar:/usr/etc/zookeeper" - -ZOOCFG="$ZOOCFGDIR/zoo.cfg" -ZOO_LOG_DIR=/var/log/$NAME -USER=zookeeper -GROUP=zookeeper -PIDDIR=/var/run/$NAME -PIDFILE=$PIDDIR/$NAME.pid -SCRIPTNAME=/etc/init.d/$NAME -JAVA=/usr/bin/java -ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" -ZOO_LOG4J_PROP="INFO,ROLLINGFILE" -JMXLOCALONLY=false -JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \ - -Xmx{{ '{{' }} cluster.get('xmx','1G') {{ '}}' }} \ - -Xloggc:/var/log/$NAME/zookeeper-gc.log \ - -XX:+UseGCLogFileRotation \ - -XX:NumberOfGCLogFiles=16 \ - -XX:GCLogFileSize=16M \ - -verbose:gc \ - -XX:+PrintGCTimeStamps \ - -XX:+PrintGCDateStamps \ - -XX:+PrintGCDetails - -XX:+PrintTenuringDistribution \ - -XX:+PrintGCApplicationStoppedTime \ - -XX:+PrintGCApplicationConcurrentTime \ - -XX:+PrintSafepointStatistics \ - -XX:+UseParNewGC \ - -XX:+UseConcMarkSweepGC \ --XX:+CMSParallelRemarkEnabled" -``` - -Sal init: - -``` text -description "zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} centralized coordination service" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -limit nofile 8192 8192 - -pre-start script - [ -r "/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment" ] || exit 0 - . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment - [ -d $ZOO_LOG_DIR ] || mkdir -p $ZOO_LOG_DIR - chown $USER:$GROUP $ZOO_LOG_DIR -end script - -script - . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment - [ -r /etc/default/zookeeper ] && . /etc/default/zookeeper - if [ -z "$JMXDISABLE" ]; then - JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY" - fi - exec start-stop-daemon --start -c $USER --exec $JAVA --name zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} \ - -- -cp $CLASSPATH $JAVA_OPTS -Dzookeeper.log.dir=${ZOO_LOG_DIR} \ - -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG -end script -``` - -{## [Artículo Original](https://clickhouse.tech/docs/en/operations/tips/) ##} diff --git a/docs/es/operations/troubleshooting.md b/docs/es/operations/troubleshooting.md deleted file mode 100644 index 9e8d2caca59..00000000000 --- a/docs/es/operations/troubleshooting.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 46 -toc_title: "Soluci\xF3n de problemas" ---- - -# Solución de problemas {#troubleshooting} - -- [Instalación](#troubleshooting-installation-errors) -- [Conexión al servidor](#troubleshooting-accepts-no-connections) -- [Procesamiento de consultas](#troubleshooting-does-not-process-queries) -- [Eficiencia del procesamiento de consultas](#troubleshooting-too-slow) - -## Instalación {#troubleshooting-installation-errors} - -### No puede obtener paquetes Deb del repositorio ClickHouse con Apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} - -- Compruebe la configuración del firewall. -- Si no puede acceder al repositorio por cualquier motivo, descargue los paquetes como se describe en el [Primeros pasos](../getting-started/index.md) artículo e instálelos manualmente usando el `sudo dpkg -i ` comando. También necesitará el `tzdata` paquete. - -## Conexión al servidor {#troubleshooting-accepts-no-connections} - -Posibles problemas: - -- El servidor no se está ejecutando. -- Parámetros de configuración inesperados o incorrectos. - -### El servidor no se está ejecutando {#server-is-not-running} - -**Compruebe si el servidor está ejecutado** - -Comando: - -``` bash -$ sudo service clickhouse-server status -``` - -Si el servidor no se está ejecutando, inícielo con el comando: - -``` bash -$ sudo service clickhouse-server start -``` - -**Comprobar registros** - -El registro principal de `clickhouse-server` está en `/var/log/clickhouse-server/clickhouse-server.log` predeterminada. - -Si el servidor se inició correctamente, debería ver las cadenas: - -- ` Application: starting up.` — Server started. -- ` Application: Ready for connections.` — Server is running and ready for connections. - -Si `clickhouse-server` error de inicio con un error de configuración, debería ver el `` cadena con una descripción de error. Por ejemplo: - -``` text -2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused -``` - -Si no ve un error al final del archivo, revise todo el archivo a partir de la cadena: - -``` text - Application: starting up. -``` - -Si intenta iniciar una segunda instancia de `clickhouse-server` en el servidor, verá el siguiente registro: - -``` text -2019.01.11 15:25:11.151730 [ 1 ] {} : Starting ClickHouse 19.1.0 with revision 54413 -2019.01.11 15:25:11.154578 [ 1 ] {} Application: starting up -2019.01.11 15:25:11.156361 [ 1 ] {} StatusFile: Status file ./status already exists - unclean restart. Contents: -PID: 8510 -Started at: 2019-01-11 15:24:23 -Revision: 54413 - -2019.01.11 15:25:11.156673 [ 1 ] {} Application: DB::Exception: Cannot lock file ./status. Another server instance in same directory is already running. -2019.01.11 15:25:11.156682 [ 1 ] {} Application: shutting down -2019.01.11 15:25:11.156686 [ 1 ] {} Application: Uninitializing subsystem: Logging Subsystem -2019.01.11 15:25:11.156716 [ 2 ] {} BaseDaemon: Stop SignalListener thread -``` - -**Ver sistema.d registros** - -Si no encuentra ninguna información útil en `clickhouse-server` registros o no hay registros, puede ver `system.d` registros usando el comando: - -``` bash -$ sudo journalctl -u clickhouse-server -``` - -**Iniciar clickhouse-server en modo interactivo** - -``` bash -$ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml -``` - -Este comando inicia el servidor como una aplicación interactiva con parámetros estándar del script de inicio automático. En este modo `clickhouse-server` imprime todos los mensajes de eventos en la consola. - -### Parámetros de configuración {#configuration-parameters} - -Comprobar: - -- Configuración de Docker. - - Si ejecuta ClickHouse en Docker en una red IPv6, asegúrese de que `network=host` se establece. - -- Configuración del punto final. - - Comprobar [listen_host](server-configuration-parameters/settings.md#server_configuration_parameters-listen_host) y [Tcp_port](server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) configuración. - - El servidor ClickHouse acepta conexiones localhost solo de forma predeterminada. - -- Configuración del protocolo HTTP. - - Compruebe la configuración del protocolo para la API HTTP. - -- Configuración de conexión segura. - - Comprobar: - - - El [Tcp_port_secure](server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) configuración. - - Ajustes para [Sertificados SSL](server-configuration-parameters/settings.md#server_configuration_parameters-openssl). - - Utilice los parámetros adecuados mientras se conecta. Por ejemplo, utilice el `port_secure` parámetro con `clickhouse_client`. - -- Configuración del usuario. - - Es posible que esté utilizando el nombre de usuario o la contraseña incorrectos. - -## Procesamiento de consultas {#troubleshooting-does-not-process-queries} - -Si ClickHouse no puede procesar la consulta, envía una descripción de error al cliente. En el `clickhouse-client` obtienes una descripción del error en la consola. Si está utilizando la interfaz HTTP, ClickHouse envía la descripción del error en el cuerpo de la respuesta. Por ejemplo: - -``` bash -$ curl 'http://localhost:8123/' --data-binary "SELECT a" -Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception -``` - -Si empiezas `clickhouse-client` con el `stack-trace` parámetro, ClickHouse devuelve el seguimiento de la pila del servidor con la descripción de un error. - -Es posible que vea un mensaje sobre una conexión rota. En este caso, puede repetir la consulta. Si la conexión se rompe cada vez que realiza la consulta, compruebe si hay errores en los registros del servidor. - -## Eficiencia del procesamiento de consultas {#troubleshooting-too-slow} - -Si ve que ClickHouse funciona demasiado lentamente, debe perfilar la carga en los recursos del servidor y la red para sus consultas. - -Puede utilizar la utilidad clickhouse-benchmark para crear perfiles de consultas. Muestra el número de consultas procesadas por segundo, el número de filas procesadas por segundo y percentiles de tiempos de procesamiento de consultas. diff --git a/docs/es/operations/update.md b/docs/es/operations/update.md deleted file mode 100644 index 11d15381d72..00000000000 --- a/docs/es/operations/update.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 47 -toc_title: "Actualizaci\xF3n de ClickHouse" ---- - -# Actualización de ClickHouse {#clickhouse-update} - -Si se instaló ClickHouse desde paquetes deb, ejecute los siguientes comandos en el servidor: - -``` bash -$ sudo apt-get update -$ sudo apt-get install clickhouse-client clickhouse-server -$ sudo service clickhouse-server restart -``` - -Si ha instalado ClickHouse utilizando algo distinto de los paquetes deb recomendados, utilice el método de actualización adecuado. - -ClickHouse no admite una actualización distribuida. La operación debe realizarse consecutivamente en cada servidor separado. No actualice todos los servidores de un clúster simultáneamente, o el clúster no estará disponible durante algún tiempo. diff --git a/docs/es/operations/utilities/clickhouse-benchmark.md b/docs/es/operations/utilities/clickhouse-benchmark.md deleted file mode 100644 index 9bcafa40dfe..00000000000 --- a/docs/es/operations/utilities/clickhouse-benchmark.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 61 -toc_title: Sistema abierto. ---- - -# Sistema abierto {#clickhouse-benchmark} - -Se conecta a un servidor ClickHouse y envía repetidamente las consultas especificadas. - -Sintaxis: - -``` bash -$ echo "single query" | clickhouse-benchmark [keys] -``` - -o - -``` bash -$ clickhouse-benchmark [keys] <<< "single query" -``` - -Si desea enviar un conjunto de consultas, cree un archivo de texto y coloque cada consulta en la cadena individual de este archivo. Por ejemplo: - -``` sql -SELECT * FROM system.numbers LIMIT 10000000 -SELECT 1 -``` - -Luego pase este archivo a una entrada estándar de `clickhouse-benchmark`. - -``` bash -clickhouse-benchmark [keys] < queries_file -``` - -## Claves {#clickhouse-benchmark-keys} - -- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` se envía simultáneamente. Valor predeterminado: 1. -- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. -- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. Para el [modo de comparación](#clickhouse-benchmark-comparison-mode) puedes usar múltiples `-h` claves. -- `-p N`, `--port=N` — Server port. Default value: 9000. For the [modo de comparación](#clickhouse-benchmark-comparison-mode) puedes usar múltiples `-p` claves. -- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. -- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. -- `-s`, `--secure` — Using TLS connection. -- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` detiene el envío de consultas cuando se alcanza el límite de tiempo especificado. Valor predeterminado: 0 (límite de tiempo desactivado). -- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [modo de comparación](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` realiza el [Prueba t independiente de dos muestras para estudiantes](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) prueba para determinar si las dos distribuciones no son diferentes con el nivel de confianza seleccionado. -- `--cumulative` — Printing cumulative data instead of data per interval. -- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. -- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` emite un informe al archivo JSON especificado. -- `--user=USERNAME` — ClickHouse user name. Default value: `default`. -- `--password=PSWD` — ClickHouse user password. Default value: empty string. -- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` las salidas acumulan rastros de excepciones. -- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` en la etapa especificada. Valores posibles: `complete`, `fetch_columns`, `with_mergeable_state`. Valor predeterminado: `complete`. -- `--help` — Shows the help message. - -Si desea aplicar alguna [configuración](../../operations/settings/index.md) para consultas, páselas como una clave `--= SETTING_VALUE`. Por ejemplo, `--max_memory_usage=1048576`. - -## Salida {#clickhouse-benchmark-output} - -Predeterminada, `clickhouse-benchmark` informes para cada `--delay` intervalo. - -Ejemplo del informe: - -``` text -Queries executed: 10. - -localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. - -0.000% 0.145 sec. -10.000% 0.146 sec. -20.000% 0.146 sec. -30.000% 0.146 sec. -40.000% 0.147 sec. -50.000% 0.148 sec. -60.000% 0.148 sec. -70.000% 0.148 sec. -80.000% 0.149 sec. -90.000% 0.150 sec. -95.000% 0.150 sec. -99.000% 0.150 sec. -99.900% 0.150 sec. -99.990% 0.150 sec. -``` - -En el informe puedes encontrar: - -- Número de consultas en el `Queries executed:` campo. - -- Cadena de estado que contiene (en orden): - - - Punto final del servidor ClickHouse. - - Número de consultas procesadas. - - QPS: QPS: ¿Cuántas consultas realizó el servidor por segundo durante un período `--delay` argumento. - - RPS: ¿Cuántas filas lee el servidor por segundo durante un período `--delay` argumento. - - MiB/s: ¿Cuántos mebibytes servidor leído por segundo durante un período especificado en el `--delay` argumento. - - resultado RPS: ¿Cuántas filas colocadas por el servidor al resultado de una consulta por segundo durante un período `--delay` argumento. - - resultado MiB/s. ¿Cuántos mebibytes colocados por el servidor al resultado de una consulta por segundo durante un período especificado en el `--delay` argumento. - -- Percentiles de tiempo de ejecución de consultas. - -## Modo de comparación {#clickhouse-benchmark-comparison-mode} - -`clickhouse-benchmark` puede comparar el rendimiento de dos servidores ClickHouse en ejecución. - -Para utilizar el modo de comparación, especifique los puntos finales de ambos servidores `--host`, `--port` claves. Las claves coinciden entre sí por posición en la lista de argumentos, la primera `--host` se empareja con la primera `--port` y así sucesivamente. `clickhouse-benchmark` establece conexiones a ambos servidores, luego envía consultas. Cada consulta dirigida a un servidor seleccionado al azar. Los resultados se muestran para cada servidor por separado. - -## Ejemplo {#clickhouse-benchmark-example} - -``` bash -$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 -``` - -``` text -Loaded 1 queries. - -Queries executed: 6. - -localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. - -0.000% 0.159 sec. -10.000% 0.159 sec. -20.000% 0.159 sec. -30.000% 0.160 sec. -40.000% 0.160 sec. -50.000% 0.162 sec. -60.000% 0.164 sec. -70.000% 0.165 sec. -80.000% 0.166 sec. -90.000% 0.166 sec. -95.000% 0.167 sec. -99.000% 0.167 sec. -99.900% 0.167 sec. -99.990% 0.167 sec. - - - -Queries executed: 10. - -localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. - -0.000% 0.159 sec. -10.000% 0.159 sec. -20.000% 0.160 sec. -30.000% 0.163 sec. -40.000% 0.164 sec. -50.000% 0.165 sec. -60.000% 0.166 sec. -70.000% 0.166 sec. -80.000% 0.167 sec. -90.000% 0.167 sec. -95.000% 0.170 sec. -99.000% 0.172 sec. -99.900% 0.172 sec. -99.990% 0.172 sec. -``` diff --git a/docs/es/operations/utilities/clickhouse-copier.md b/docs/es/operations/utilities/clickhouse-copier.md deleted file mode 100644 index 5717ffaa737..00000000000 --- a/docs/es/operations/utilities/clickhouse-copier.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 59 -toc_title: "M\xE9todo de codificaci\xF3n de datos:" ---- - -# Método de codificación de datos: {#clickhouse-copier} - -Copia datos de las tablas de un clúster en tablas de otro (o del mismo) clúster. - -Puede ejecutar varios `clickhouse-copier` instancias en diferentes servidores para realizar el mismo trabajo. ZooKeeper se utiliza para sincronizar los procesos. - -Después de comenzar, `clickhouse-copier`: - -- Se conecta a ZooKeeper y recibe: - - - Copia de trabajos. - - El estado de los trabajos de copia. - -- Realiza los trabajos. - - Cada proceso en ejecución elige el “closest” el fragmento del clúster de origen y copia los datos en el clúster de destino, reafirmando los datos si es necesario. - -`clickhouse-copier` realiza un seguimiento de los cambios en ZooKeeper y los aplica sobre la marcha. - -Para reducir el tráfico de red, recomendamos ejecutar `clickhouse-copier` en el mismo servidor donde se encuentran los datos de origen. - -## Ejecución de Clickhouse-copiadora {#running-clickhouse-copier} - -La utilidad debe ejecutarse manualmente: - -``` bash -$ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir -``` - -Parámetros: - -- `daemon` — Starts `clickhouse-copier` en modo daemon. -- `config` — The path to the `zookeeper.xml` con los parámetros para la conexión a ZooKeeper. -- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` procesos y tareas de almacenamiento. Las tareas se almacenan en `$task-path/description`. -- `task-file` — Optional path to file with task configuration for initial upload to ZooKeeper. -- `task-upload-force` — Force upload `task-file` incluso si el nodo ya existe. -- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` crear `clickhouse-copier_YYYYMMHHSS_` subdirectorios en `$base-dir`. Si se omite este parámetro, los directorios se crean en el directorio donde `clickhouse-copier` se puso en marcha. - -## Formato de Zookeeper.XML {#format-of-zookeeper-xml} - -``` xml - - - trace - 100M - 3 - - - - - 127.0.0.1 - 2181 - - - -``` - -## Configuración de tareas de copia {#configuration-of-copying-tasks} - -``` xml - - - - - - false - - 127.0.0.1 - 9000 - - - ... - - - - ... - - - - - 2 - - - - 1 - - - - - 0 - - - - - 3 - - 1 - - - - - - - - source_cluster - test - hits - - - destination_cluster - test - hits2 - - - - ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hits2', '{replica}') - PARTITION BY toMonday(date) - ORDER BY (CounterID, EventDate) - - - - jumpConsistentHash(intHash64(UserID), 2) - - - CounterID != 0 - - - - '2018-02-26' - '2018-03-05' - ... - - - - - - ... - - ... - - -``` - -`clickhouse-copier` seguimiento de los cambios en `/task/path/description` y los aplica sobre la marcha. Por ejemplo, si cambia el valor de `max_workers`, el número de procesos que ejecutan tareas también cambiará. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) diff --git a/docs/es/operations/utilities/clickhouse-local.md b/docs/es/operations/utilities/clickhouse-local.md deleted file mode 100644 index e122f668f53..00000000000 --- a/docs/es/operations/utilities/clickhouse-local.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 60 -toc_title: clickhouse-local ---- - -# clickhouse-local {#clickhouse-local} - -El `clickhouse-local` El programa le permite realizar un procesamiento rápido en archivos locales, sin tener que implementar y configurar el servidor ClickHouse. - -Acepta datos que representan tablas y las consulta usando [Nombre de la red inalámbrica (SSID):](../../sql-reference/index.md). - -`clickhouse-local` utiliza el mismo núcleo que el servidor ClickHouse, por lo que es compatible con la mayoría de las características y el mismo conjunto de formatos y motores de tabla. - -Predeterminada `clickhouse-local` no tiene acceso a los datos en el mismo host, pero admite la carga de la configuración del servidor `--config-file` argumento. - -!!! warning "Advertencia" - No se recomienda cargar la configuración del servidor de producción en `clickhouse-local` Porque los datos pueden dañarse en caso de error humano. - -## Uso {#usage} - -Uso básico: - -``` bash -$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query" -``` - -Argumento: - -- `-S`, `--structure` — table structure for input data. -- `-if`, `--input-format` — input format, `TSV` predeterminada. -- `-f`, `--file` — path to data, `stdin` predeterminada. -- `-q` `--query` — queries to execute with `;` como delimitador. -- `-N`, `--table` — table name where to put output data, `table` predeterminada. -- `-of`, `--format`, `--output-format` — output format, `TSV` predeterminada. -- `--stacktrace` — whether to dump debug output in case of exception. -- `--verbose` — more details on query execution. -- `-s` — disables `stderr` tala. -- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. -- `--help` — arguments references for `clickhouse-local`. - -También hay argumentos para cada variable de configuración de ClickHouse que se usan más comúnmente en lugar de `--config-file`. - -## Ejemplos {#examples} - -``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" -Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. -1 2 -3 4 -``` - -El ejemplo anterior es el mismo que: - -``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" -Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. -1 2 -3 4 -``` - -Ahora vamos a usuario de memoria de salida para cada usuario de Unix: - -``` bash -$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" -``` - -``` text -Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. -┏━━━━━━━━━━┳━━━━━━━━━━┓ -┃ user ┃ memTotal ┃ -┡━━━━━━━━━━╇━━━━━━━━━━┩ -│ bayonet │ 113.5 │ -├──────────┼──────────┤ -│ root │ 8.8 │ -├──────────┼──────────┤ -... -``` - -[Artículo Original](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/es/operations/utilities/index.md b/docs/es/operations/utilities/index.md deleted file mode 100644 index a69397a326c..00000000000 --- a/docs/es/operations/utilities/index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Utilidad -toc_priority: 56 -toc_title: "Descripci\xF3n" ---- - -# Utilidad ClickHouse {#clickhouse-utility} - -- [Sistema abierto.](clickhouse-local.md#clickhouse-local) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` hace esto. -- [Método de codificación de datos:](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. -- [Sistema abierto.](clickhouse-benchmark.md) — Loads server with the custom queries and settings. - -[Artículo Original](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/es/roadmap.md b/docs/es/roadmap.md deleted file mode 100644 index 60db1c608df..00000000000 --- a/docs/es/roadmap.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -machine_translated: true ---- - -# Hoja De Ruta {#roadmap} - -## Q1 2020 {#q1-2020} - -- Control de acceso basado en roles - -## Q2 2020 {#q2-2020} - -- Integración con servicios de autenticación externos -- Grupos de recursos para una distribución más precisa de la capacidad del clúster entre los usuarios - -{## [Artículo Original](https://clickhouse.tech/docs/es/roadmap/) ##} diff --git a/docs/es/sql-reference/aggregate-functions/combinators.md b/docs/es/sql-reference/aggregate-functions/combinators.md deleted file mode 100644 index c9fdcb9478f..00000000000 --- a/docs/es/sql-reference/aggregate-functions/combinators.md +++ /dev/null @@ -1,245 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 37 -toc_title: Combinadores ---- - -# Combinadores de funciones agregadas {#aggregate_functions_combinators} - -El nombre de una función agregada puede tener un sufijo anexado. Esto cambia la forma en que funciona la función de agregado. - -## -Si {#agg-functions-combinator-if} - -The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). - -Ejemplos: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` y así sucesivamente. - -Con las funciones de agregado condicional, puede calcular agregados para varias condiciones a la vez, sin utilizar subconsultas y `JOIN`Por ejemplo, en Yandex.Metrica, las funciones de agregado condicional se utilizan para implementar la funcionalidad de comparación de segmentos. - -## -Matriz {#agg-functions-combinator-array} - -El sufijo -Array se puede agregar a cualquier función agregada. En este caso, la función de agregado toma argumentos del ‘Array(T)’ tipo (arrays) en lugar de ‘T’ argumentos de tipo. Si la función de agregado acepta varios argumentos, deben ser matrices de igual longitud. Al procesar matrices, la función de agregado funciona como la función de agregado original en todos los elementos de la matriz. - -Ejemplo 1: `sumArray(arr)` - Totales de todos los elementos de todos ‘arr’ matriz. En este ejemplo, podría haber sido escrito más simplemente: `sum(arraySum(arr))`. - -Ejemplo 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ matriz. Esto podría hacerse de una manera más fácil: `uniq(arrayJoin(arr))`, pero no siempre es posible agregar ‘arrayJoin’ a una consulta. - --If y -Array se pueden combinar. Obstante, ‘Array’ debe venir primero, entonces ‘If’. Ejemplos: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Debido a este pedido, el ‘cond’ argumento no será una matriz. - -## -Estado {#agg-functions-combinator-state} - -Si aplica este combinador, la función de agregado no devuelve el valor resultante (como el número de valores únicos para el [uniq](reference.md#agg_function-uniq) función), pero un estado intermedio de la agregación (para `uniq`, esta es la tabla hash para calcular el número de valores únicos). Este es un `AggregateFunction(...)` que puede ser utilizado para su posterior procesamiento o almacenado en una tabla para terminar de agregar más tarde. - -Para trabajar con estos estados, use: - -- [AgregaciónMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) motor de mesa. -- [finalizeAggregation](../../sql-reference/functions/other-functions.md#function-finalizeaggregation) función. -- [runningAccumulate](../../sql-reference/functions/other-functions.md#function-runningaccumulate) función. -- [-Fusionar](#aggregate_functions_combinators-merge) combinador. -- [-MergeState](#aggregate_functions_combinators-mergestate) combinador. - -## -Fusionar {#aggregate_functions_combinators-merge} - -Si aplica este combinador, la función de agregado toma el estado de agregación intermedio como argumento, combina los estados para finalizar la agregación y devuelve el valor resultante. - -## -MergeState {#aggregate_functions_combinators-mergestate} - -Combina los estados de agregación intermedios de la misma manera que el combinador -Merge. Sin embargo, no devuelve el valor resultante, sino un estado de agregación intermedio, similar al combinador -State. - -## -ForEach {#agg-functions-combinator-foreach} - -Convierte una función de agregado para tablas en una función de agregado para matrices que agrega los elementos de matriz correspondientes y devuelve una matriz de resultados. Por ejemplo, `sumForEach` para las matrices `[1, 2]`, `[3, 4, 5]`y`[6, 7]`devuelve el resultado `[10, 13, 5]` después de agregar los elementos de la matriz correspondientes. - -## -OPor defecto {#agg-functions-combinator-ordefault} - -Cambia el comportamiento de una función agregada. - -Si una función agregada no tiene valores de entrada, con este combinador devuelve el valor predeterminado para su tipo de datos de retorno. Se aplica a las funciones agregadas que pueden tomar datos de entrada vacíos. - -`-OrDefault` se puede utilizar con otros combinadores. - -**Sintaxis** - -``` sql -OrDefault(x) -``` - -**Parámetros** - -- `x` — Aggregate function parameters. - -**Valores devueltos** - -Devuelve el valor predeterminado del tipo devuelto de una función de agregado si no hay nada que agregar. - -El tipo depende de la función de agregado utilizada. - -**Ejemplo** - -Consulta: - -``` sql -SELECT avg(number), avgOrDefault(number) FROM numbers(0) -``` - -Resultado: - -``` text -┌─avg(number)─┬─avgOrDefault(number)─┐ -│ nan │ 0 │ -└─────────────┴──────────────────────┘ -``` - -También `-OrDefault` se puede utilizar con otros combinadores. Es útil cuando la función de agregado no acepta la entrada vacía. - -Consulta: - -``` sql -SELECT avgOrDefaultIf(x, x > 10) -FROM -( - SELECT toDecimal32(1.23, 2) AS x -) -``` - -Resultado: - -``` text -┌─avgOrDefaultIf(x, greater(x, 10))─┐ -│ 0.00 │ -└───────────────────────────────────┘ -``` - -## -OrNull {#agg-functions-combinator-ornull} - -Cambia el comportamiento de una función agregada. - -Este combinador convierte un resultado de una función agregada en [NULL](../data-types/nullable.md) tipo de datos. Si la función de agregado no tiene valores para calcular devuelve [NULL](../syntax.md#null-literal). - -`-OrNull` se puede utilizar con otros combinadores. - -**Sintaxis** - -``` sql -OrNull(x) -``` - -**Parámetros** - -- `x` — Aggregate function parameters. - -**Valores devueltos** - -- El resultado de la función de agregado, convertida a la `Nullable` tipo de datos. -- `NULL`, si no hay nada que agregar. - -Tipo: `Nullable(aggregate function return type)`. - -**Ejemplo** - -Añadir `-orNull` hasta el final de la función agregada. - -Consulta: - -``` sql -SELECT sumOrNull(number), toTypeName(sumOrNull(number)) FROM numbers(10) WHERE number > 10 -``` - -Resultado: - -``` text -┌─sumOrNull(number)─┬─toTypeName(sumOrNull(number))─┐ -│ ᴺᵁᴸᴸ │ Nullable(UInt64) │ -└───────────────────┴───────────────────────────────┘ -``` - -También `-OrNull` se puede utilizar con otros combinadores. Es útil cuando la función de agregado no acepta la entrada vacía. - -Consulta: - -``` sql -SELECT avgOrNullIf(x, x > 10) -FROM -( - SELECT toDecimal32(1.23, 2) AS x -) -``` - -Resultado: - -``` text -┌─avgOrNullIf(x, greater(x, 10))─┐ -│ ᴺᵁᴸᴸ │ -└────────────────────────────────┘ -``` - -## -Remuestrear {#agg-functions-combinator-resample} - -Permite dividir los datos en grupos y, a continuación, agregar por separado los datos de esos grupos. Los grupos se crean dividiendo los valores de una columna en intervalos. - -``` sql -Resample(start, end, step)(, resampling_key) -``` - -**Parámetros** - -- `start` — Starting value of the whole required interval for `resampling_key` valor. -- `stop` — Ending value of the whole required interval for `resampling_key` valor. Todo el intervalo no incluye el `stop` valor `[start, stop)`. -- `step` — Step for separating the whole interval into subintervals. The `aggFunction` se ejecuta sobre cada uno de esos subintervalos de forma independiente. -- `resampling_key` — Column whose values are used for separating data into intervals. -- `aggFunction_params` — `aggFunction` parámetros. - -**Valores devueltos** - -- Matriz de `aggFunction` resultados para cada subintervalo. - -**Ejemplo** - -Considere el `people` con los siguientes datos: - -``` text -┌─name───┬─age─┬─wage─┐ -│ John │ 16 │ 10 │ -│ Alice │ 30 │ 15 │ -│ Mary │ 35 │ 8 │ -│ Evelyn │ 48 │ 11.5 │ -│ David │ 62 │ 9.9 │ -│ Brian │ 60 │ 16 │ -└────────┴─────┴──────┘ -``` - -Obtengamos los nombres de las personas cuya edad se encuentra en los intervalos de `[30,60)` y `[60,75)`. Como usamos la representación entera para la edad, obtenemos edades en el `[30, 59]` y `[60,74]` intervalo. - -Para agregar nombres en una matriz, usamos el [Método de codificación de datos:](reference.md#agg_function-grouparray) función de agregado. Se necesita un argumento. En nuestro caso, es el `name` columna. El `groupArrayResample` función debe utilizar el `age` columna para agregar nombres por edad. Para definir los intervalos requeridos, pasamos el `30, 75, 30` discusiones sobre el `groupArrayResample` función. - -``` sql -SELECT groupArrayResample(30, 75, 30)(name, age) FROM people -``` - -``` text -┌─groupArrayResample(30, 75, 30)(name, age)─────┐ -│ [['Alice','Mary','Evelyn'],['David','Brian']] │ -└───────────────────────────────────────────────┘ -``` - -Considera los resultados. - -`Jonh` est? fuera de la muestra porque es demasiado joven. Otras personas se distribuyen de acuerdo con los intervalos de edad especificados. - -Ahora vamos a contar el número total de personas y su salario promedio en los intervalos de edad especificados. - -``` sql -SELECT - countResample(30, 75, 30)(name, age) AS amount, - avgResample(30, 75, 30)(wage, age) AS avg_wage -FROM people -``` - -``` text -┌─amount─┬─avg_wage──────────────────┐ -│ [3,2] │ [11.5,12.949999809265137] │ -└────────┴───────────────────────────┘ -``` - -[Artículo Original](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/es/sql-reference/aggregate-functions/index.md b/docs/es/sql-reference/aggregate-functions/index.md deleted file mode 100644 index 7c7d58d5f94..00000000000 --- a/docs/es/sql-reference/aggregate-functions/index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Funciones agregadas -toc_priority: 33 -toc_title: "Implantaci\xF3n" ---- - -# Funciones agregadas {#aggregate-functions} - -Las funciones agregadas funcionan en el [normal](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) forma esperada por los expertos en bases de datos. - -ClickHouse también es compatible: - -- [Funciones agregadas paramétricas](parametric-functions.md#aggregate_functions_parametric) que aceptan otros parámetros además de las columnas. -- [Combinadores](combinators.md#aggregate_functions_combinators), que cambian el comportamiento de las funciones agregadas. - -## Procesamiento NULL {#null-processing} - -Durante la agregación, todos `NULL`s se omiten. - -**Ejemplos:** - -Considere esta tabla: - -``` text -┌─x─┬────y─┐ -│ 1 │ 2 │ -│ 2 │ ᴺᵁᴸᴸ │ -│ 3 │ 2 │ -│ 3 │ 3 │ -│ 3 │ ᴺᵁᴸᴸ │ -└───┴──────┘ -``` - -Digamos que necesita sumar los valores en el `y` columna: - -``` sql -SELECT sum(y) FROM t_null_big -``` - - ┌─sum(y)─┐ - │ 7 │ - └────────┘ - -El `sum` función interpreta `NULL` como `0`. En particular, esto significa que si la función recibe la entrada de una selección donde todos los valores son `NULL`, entonces el resultado será `0`, ni `NULL`. - -Ahora puedes usar el `groupArray` función para crear una matriz a partir de la `y` columna: - -``` sql -SELECT groupArray(y) FROM t_null_big -``` - -``` text -┌─groupArray(y)─┐ -│ [2,2,3] │ -└───────────────┘ -``` - -`groupArray` no incluye `NULL` en la matriz resultante. - -[Artículo Original](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/es/sql-reference/aggregate-functions/parametric-functions.md b/docs/es/sql-reference/aggregate-functions/parametric-functions.md deleted file mode 100644 index ea32920401b..00000000000 --- a/docs/es/sql-reference/aggregate-functions/parametric-functions.md +++ /dev/null @@ -1,499 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 38 -toc_title: "Param\xE9trico" ---- - -# Funciones agregadas paramétricas {#aggregate_functions_parametric} - -Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. - -## histograma {#histogram} - -Calcula un histograma adaptativo. No garantiza resultados precisos. - -``` sql -histogram(number_of_bins)(values) -``` - -Las funciones utiliza [Un algoritmo de árbol de decisión paralelo de transmisión](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). Los bordes de los contenedores de histograma se ajustan a medida que los nuevos datos entran en una función. En caso común, los anchos de los contenedores no son iguales. - -**Parámetros** - -`number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. -`values` — [Expresion](../syntax.md#syntax-expressions) resultando en valores de entrada. - -**Valores devueltos** - -- [Matriz](../../sql-reference/data-types/array.md) de [Tuples](../../sql-reference/data-types/tuple.md) del siguiente formato: - - ``` - [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] - ``` - - - `lower` — Lower bound of the bin. - - `upper` — Upper bound of the bin. - - `height` — Calculated height of the bin. - -**Ejemplo** - -``` sql -SELECT histogram(5)(number + 1) -FROM ( - SELECT * - FROM system.numbers - LIMIT 20 -) -``` - -``` text -┌─histogram(5)(plus(number, 1))───────────────────────────────────────────┐ -│ [(1,4.5,4),(4.5,8.5,4),(8.5,12.75,4.125),(12.75,17,4.625),(17,20,3.25)] │ -└─────────────────────────────────────────────────────────────────────────┘ -``` - -Puede visualizar un histograma con el [Bar](../../sql-reference/functions/other-functions.md#function-bar) función, por ejemplo: - -``` sql -WITH histogram(5)(rand() % 100) AS hist -SELECT - arrayJoin(hist).3 AS height, - bar(height, 0, 6, 5) AS bar -FROM -( - SELECT * - FROM system.numbers - LIMIT 20 -) -``` - -``` text -┌─height─┬─bar───┐ -│ 2.125 │ █▋ │ -│ 3.25 │ ██▌ │ -│ 5.625 │ ████▏ │ -│ 5.625 │ ████▏ │ -│ 3.375 │ ██▌ │ -└────────┴───────┘ -``` - -En este caso, debe recordar que no conoce los bordes del contenedor del histograma. - -## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} - -Comprueba si la secuencia contiene una cadena de eventos que coincida con el patrón. - -``` sql -sequenceMatch(pattern)(timestamp, cond1, cond2, ...) -``` - -!!! warning "Advertencia" - Los eventos que ocurren en el mismo segundo pueden estar en la secuencia en un orden indefinido que afecta el resultado. - -**Parámetros** - -- `pattern` — Pattern string. See [Sintaxis de patrón](#sequence-function-pattern-syntax). - -- `timestamp` — Column considered to contain time data. Typical data types are `Date` y `DateTime`. También puede utilizar cualquiera de los [UInt](../../sql-reference/data-types/int-uint.md) tipos de datos. - -- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. Puede pasar hasta 32 argumentos de condición. La función sólo tiene en cuenta los eventos descritos en estas condiciones. Si la secuencia contiene datos que no se describen en una condición, la función los omite. - -**Valores devueltos** - -- 1, si el patrón coincide. -- 0, si el patrón no coincide. - -Tipo: `UInt8`. - - -**Sintaxis de patrón** - -- `(?N)` — Matches the condition argument at position `N`. Las condiciones están numeradas en el `[1, 32]` gama. Por ejemplo, `(?1)` coincide con el argumento pasado al `cond1` parámetro. - -- `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern. - -- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` coincide con los eventos que ocurren a más de 1800 segundos el uno del otro. Un número arbitrario de cualquier evento puede estar entre estos eventos. Puede usar el `>=`, `>`, `<`, `<=` operador. - -**Ejemplos** - -Considere los datos en el `t` tabla: - -``` text -┌─time─┬─number─┐ -│ 1 │ 1 │ -│ 2 │ 3 │ -│ 3 │ 2 │ -└──────┴────────┘ -``` - -Realizar la consulta: - -``` sql -SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t -``` - -``` text -┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2))─┐ -│ 1 │ -└───────────────────────────────────────────────────────────────────────┘ -``` - -La función encontró la cadena de eventos donde el número 2 sigue al número 1. Se saltó el número 3 entre ellos, porque el número no se describe como un evento. Si queremos tener en cuenta este número al buscar la cadena de eventos dada en el ejemplo, debemos establecer una condición para ello. - -``` sql -SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM t -``` - -``` text -┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 3))─┐ -│ 0 │ -└──────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -En este caso, la función no pudo encontrar la cadena de eventos que coincida con el patrón, porque el evento para el número 3 ocurrió entre 1 y 2. Si en el mismo caso comprobamos la condición para el número 4, la secuencia coincidiría con el patrón. - -``` sql -SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t -``` - -``` text -┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 4))─┐ -│ 1 │ -└──────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -**Ver también** - -- [sequenceCount](#function-sequencecount) - -## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} - -Cuenta el número de cadenas de eventos que coinciden con el patrón. La función busca cadenas de eventos que no se superponen. Comienza a buscar la siguiente cadena después de que se haga coincidir la cadena actual. - -!!! warning "Advertencia" - Los eventos que ocurren en el mismo segundo pueden estar en la secuencia en un orden indefinido que afecta el resultado. - -``` sql -sequenceCount(pattern)(timestamp, cond1, cond2, ...) -``` - -**Parámetros** - -- `pattern` — Pattern string. See [Sintaxis de patrón](#sequence-function-pattern-syntax). - -- `timestamp` — Column considered to contain time data. Typical data types are `Date` y `DateTime`. También puede utilizar cualquiera de los [UInt](../../sql-reference/data-types/int-uint.md) tipos de datos. - -- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. Puede pasar hasta 32 argumentos de condición. La función sólo tiene en cuenta los eventos descritos en estas condiciones. Si la secuencia contiene datos que no se describen en una condición, la función los omite. - -**Valores devueltos** - -- Número de cadenas de eventos no superpuestas que coinciden. - -Tipo: `UInt64`. - -**Ejemplo** - -Considere los datos en el `t` tabla: - -``` text -┌─time─┬─number─┐ -│ 1 │ 1 │ -│ 2 │ 3 │ -│ 3 │ 2 │ -│ 4 │ 1 │ -│ 5 │ 3 │ -│ 6 │ 2 │ -└──────┴────────┘ -``` - -Cuente cuántas veces ocurre el número 2 después del número 1 con cualquier cantidad de otros números entre ellos: - -``` sql -SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t -``` - -``` text -┌─sequenceCount('(?1).*(?2)')(time, equals(number, 1), equals(number, 2))─┐ -│ 2 │ -└─────────────────────────────────────────────────────────────────────────┘ -``` - -**Ver también** - -- [sequenceMatch](#function-sequencematch) - -## ventanaEmbudo {#windowfunnel} - -Busca cadenas de eventos en una ventana de tiempo deslizante y calcula el número máximo de eventos que ocurrieron desde la cadena. - -La función funciona de acuerdo con el algoritmo: - -- La función busca datos que desencadenan la primera condición en la cadena y establece el contador de eventos en 1. Este es el momento en que comienza la ventana deslizante. - -- Si los eventos de la cadena ocurren secuencialmente dentro de la ventana, el contador se incrementa. Si se interrumpe la secuencia de eventos, el contador no se incrementa. - -- Si los datos tienen varias cadenas de eventos en diferentes puntos de finalización, la función solo generará el tamaño de la cadena más larga. - -**Sintaxis** - -``` sql -windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) -``` - -**Parámetros** - -- `window` — Length of the sliding window in seconds. -- `mode` - Es un argumento opcional. - - `'strict'` - Cuando el `'strict'` se establece, windowFunnel() aplica condiciones solo para los valores únicos. -- `timestamp` — Name of the column containing the timestamp. Data types supported: [Fecha](../../sql-reference/data-types/date.md), [FechaHora](../../sql-reference/data-types/datetime.md#data_type-datetime) y otros tipos de enteros sin signo (tenga en cuenta que aunque timestamp admite el `UInt64` tipo, su valor no puede exceder el máximo de Int64, que es 2 ^ 63 - 1). -- `cond` — Conditions or data describing the chain of events. [UInt8](../../sql-reference/data-types/int-uint.md). - -**Valor devuelto** - -El número máximo de condiciones desencadenadas consecutivas de la cadena dentro de la ventana de tiempo deslizante. -Se analizan todas las cadenas en la selección. - -Tipo: `Integer`. - -**Ejemplo** - -Determine si un período de tiempo establecido es suficiente para que el usuario seleccione un teléfono y lo compre dos veces en la tienda en línea. - -Establezca la siguiente cadena de eventos: - -1. El usuario inició sesión en su cuenta en la tienda (`eventID = 1003`). -2. El usuario busca un teléfono (`eventID = 1007, product = 'phone'`). -3. El usuario realizó un pedido (`eventID = 1009`). -4. El usuario volvió a realizar el pedido (`eventID = 1010`). - -Tabla de entrada: - -``` text -┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ -│ 2019-01-28 │ 1 │ 2019-01-29 10:00:00 │ 1003 │ phone │ -└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ -┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ -│ 2019-01-31 │ 1 │ 2019-01-31 09:00:00 │ 1007 │ phone │ -└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ -┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ -│ 2019-01-30 │ 1 │ 2019-01-30 08:00:00 │ 1009 │ phone │ -└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ -┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ -│ 2019-02-01 │ 1 │ 2019-02-01 08:00:00 │ 1010 │ phone │ -└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ -``` - -Averigüe hasta qué punto el usuario `user_id` podría atravesar la cadena en un período de enero a febrero de 2019. - -Consulta: - -``` sql -SELECT - level, - count() AS c -FROM -( - SELECT - user_id, - windowFunnel(6048000000000000)(timestamp, eventID = 1003, eventID = 1009, eventID = 1007, eventID = 1010) AS level - FROM trend - WHERE (event_date >= '2019-01-01') AND (event_date <= '2019-02-02') - GROUP BY user_id -) -GROUP BY level -ORDER BY level ASC -``` - -Resultado: - -``` text -┌─level─┬─c─┐ -│ 4 │ 1 │ -└───────┴───┘ -``` - -## retención {#retention} - -La función toma como argumentos un conjunto de condiciones de 1 a 32 argumentos de tipo `UInt8` que indican si se cumplió una determinada condición para el evento. -Cualquier condición se puede especificar como un argumento (como en [WHERE](../../sql-reference/statements/select/where.md#select-where)). - -Las condiciones, excepto la primera, se aplican en pares: el resultado del segundo será verdadero si el primero y el segundo son verdaderos, del tercero si el primero y el fird son verdaderos, etc. - -**Sintaxis** - -``` sql -retention(cond1, cond2, ..., cond32); -``` - -**Parámetros** - -- `cond` — an expression that returns a `UInt8` resultado (1 o 0). - -**Valor devuelto** - -La matriz de 1 o 0. - -- 1 — condition was met for the event. -- 0 — condition wasn't met for the event. - -Tipo: `UInt8`. - -**Ejemplo** - -Consideremos un ejemplo de cálculo del `retention` función para determinar el tráfico del sitio. - -**1.** Сreate a table to illustrate an example. - -``` sql -CREATE TABLE retention_test(date Date, uid Int32) ENGINE = Memory; - -INSERT INTO retention_test SELECT '2020-01-01', number FROM numbers(5); -INSERT INTO retention_test SELECT '2020-01-02', number FROM numbers(10); -INSERT INTO retention_test SELECT '2020-01-03', number FROM numbers(15); -``` - -Tabla de entrada: - -Consulta: - -``` sql -SELECT * FROM retention_test -``` - -Resultado: - -``` text -┌───────date─┬─uid─┐ -│ 2020-01-01 │ 0 │ -│ 2020-01-01 │ 1 │ -│ 2020-01-01 │ 2 │ -│ 2020-01-01 │ 3 │ -│ 2020-01-01 │ 4 │ -└────────────┴─────┘ -┌───────date─┬─uid─┐ -│ 2020-01-02 │ 0 │ -│ 2020-01-02 │ 1 │ -│ 2020-01-02 │ 2 │ -│ 2020-01-02 │ 3 │ -│ 2020-01-02 │ 4 │ -│ 2020-01-02 │ 5 │ -│ 2020-01-02 │ 6 │ -│ 2020-01-02 │ 7 │ -│ 2020-01-02 │ 8 │ -│ 2020-01-02 │ 9 │ -└────────────┴─────┘ -┌───────date─┬─uid─┐ -│ 2020-01-03 │ 0 │ -│ 2020-01-03 │ 1 │ -│ 2020-01-03 │ 2 │ -│ 2020-01-03 │ 3 │ -│ 2020-01-03 │ 4 │ -│ 2020-01-03 │ 5 │ -│ 2020-01-03 │ 6 │ -│ 2020-01-03 │ 7 │ -│ 2020-01-03 │ 8 │ -│ 2020-01-03 │ 9 │ -│ 2020-01-03 │ 10 │ -│ 2020-01-03 │ 11 │ -│ 2020-01-03 │ 12 │ -│ 2020-01-03 │ 13 │ -│ 2020-01-03 │ 14 │ -└────────────┴─────┘ -``` - -**2.** Agrupar usuarios por ID único `uid` utilizando el `retention` función. - -Consulta: - -``` sql -SELECT - uid, - retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r -FROM retention_test -WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') -GROUP BY uid -ORDER BY uid ASC -``` - -Resultado: - -``` text -┌─uid─┬─r───────┐ -│ 0 │ [1,1,1] │ -│ 1 │ [1,1,1] │ -│ 2 │ [1,1,1] │ -│ 3 │ [1,1,1] │ -│ 4 │ [1,1,1] │ -│ 5 │ [0,0,0] │ -│ 6 │ [0,0,0] │ -│ 7 │ [0,0,0] │ -│ 8 │ [0,0,0] │ -│ 9 │ [0,0,0] │ -│ 10 │ [0,0,0] │ -│ 11 │ [0,0,0] │ -│ 12 │ [0,0,0] │ -│ 13 │ [0,0,0] │ -│ 14 │ [0,0,0] │ -└─────┴─────────┘ -``` - -**3.** Calcule el número total de visitas al sitio por día. - -Consulta: - -``` sql -SELECT - sum(r[1]) AS r1, - sum(r[2]) AS r2, - sum(r[3]) AS r3 -FROM -( - SELECT - uid, - retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r - FROM retention_test - WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') - GROUP BY uid -) -``` - -Resultado: - -``` text -┌─r1─┬─r2─┬─r3─┐ -│ 5 │ 5 │ 5 │ -└────┴────┴────┘ -``` - -Donde: - -- `r1`- el número de visitantes únicos que visitaron el sitio durante 2020-01-01 (la `cond1` condición). -- `r2`- el número de visitantes únicos que visitaron el sitio durante un período de tiempo específico entre 2020-01-01 y 2020-01-02 (`cond1` y `cond2` condición). -- `r3`- el número de visitantes únicos que visitaron el sitio durante un período de tiempo específico entre 2020-01-01 y 2020-01-03 (`cond1` y `cond3` condición). - -## UniqUpTo(N)(x) {#uniquptonx} - -Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. - -Recomendado para usar con Ns pequeños, hasta 10. El valor máximo de N es 100. - -Para el estado de una función agregada, utiliza la cantidad de memoria igual a 1 + N \* el tamaño de un valor de bytes. -Para las cadenas, almacena un hash no criptográfico de 8 bytes. Es decir, el cálculo se aproxima a las cadenas. - -La función también funciona para varios argumentos. - -Funciona lo más rápido posible, excepto en los casos en que se usa un valor N grande y el número de valores únicos es ligeramente menor que N. - -Ejemplo de uso: - -``` text -Problem: Generate a report that shows only keywords that produced at least 5 unique users. -Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 -``` - -[Artículo Original](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) - -## sumMapFiltered(keys_to_keep)(claves, valores) {#summapfilteredkeys-to-keepkeys-values} - -El mismo comportamiento que [sumMap](reference.md#agg_functions-summap) excepto que una matriz de claves se pasa como un parámetro. Esto puede ser especialmente útil cuando se trabaja con una alta cardinalidad de claves. diff --git a/docs/es/sql-reference/aggregate-functions/reference.md b/docs/es/sql-reference/aggregate-functions/reference.md deleted file mode 100644 index 572c4d01051..00000000000 --- a/docs/es/sql-reference/aggregate-functions/reference.md +++ /dev/null @@ -1,1914 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 36 -toc_title: Referencia ---- - -# Referencia de función agregada {#aggregate-functions-reference} - -## contar {#agg_function-count} - -Cuenta el número de filas o valores no NULL. - -ClickHouse admite las siguientes sintaxis para `count`: -- `count(expr)` o `COUNT(DISTINCT expr)`. -- `count()` o `COUNT(*)`. El `count()` la sintaxis es específica de ClickHouse. - -**Parámetros** - -La función puede tomar: - -- Cero parámetros. -- Una [expresion](../syntax.md#syntax-expressions). - -**Valor devuelto** - -- Si se llama a la función sin parámetros, cuenta el número de filas. -- Si el [expresion](../syntax.md#syntax-expressions) se pasa, entonces la función cuenta cuántas veces esta expresión devuelve no nula. Si la expresión devuelve un [NULL](../../sql-reference/data-types/nullable.md)-type valor, entonces el resultado de `count` no se queda `Nullable`. La función devuelve 0 si la expresión devuelta `NULL` para todas las filas. - -En ambos casos el tipo del valor devuelto es [UInt64](../../sql-reference/data-types/int-uint.md). - -**Detalles** - -ClickHouse soporta el `COUNT(DISTINCT ...)` sintaxis. El comportamiento de esta construcción depende del [count_distinct_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation) configuración. Define cuál de las [uniq\*](#agg_function-uniq) se utiliza para realizar la operación. El valor predeterminado es el [uniqExact](#agg_function-uniqexact) función. - -El `SELECT count() FROM table` consulta no está optimizado, porque el número de entradas en la tabla no se almacena por separado. Elige una pequeña columna de la tabla y cuenta el número de valores en ella. - -**Ejemplos** - -Ejemplo 1: - -``` sql -SELECT count() FROM t -``` - -``` text -┌─count()─┐ -│ 5 │ -└─────────┘ -``` - -Ejemplo 2: - -``` sql -SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' -``` - -``` text -┌─name──────────────────────────┬─value─────┐ -│ count_distinct_implementation │ uniqExact │ -└───────────────────────────────┴───────────┘ -``` - -``` sql -SELECT count(DISTINCT num) FROM t -``` - -``` text -┌─uniqExact(num)─┐ -│ 3 │ -└────────────────┘ -``` - -Este ejemplo muestra que `count(DISTINCT num)` se realiza por el `uniqExact` función según el `count_distinct_implementation` valor de ajuste. - -## cualquiera (x) {#agg_function-any} - -Selecciona el primer valor encontrado. -La consulta se puede ejecutar en cualquier orden e incluso en un orden diferente cada vez, por lo que el resultado de esta función es indeterminado. -Para obtener un resultado determinado, puede usar el ‘min’ o ‘max’ función en lugar de ‘any’. - -En algunos casos, puede confiar en el orden de ejecución. Esto se aplica a los casos en que SELECT proviene de una subconsulta que usa ORDER BY. - -Cuando un `SELECT` consulta tiene el `GROUP BY` cláusula o al menos una función agregada, ClickHouse (en contraste con MySQL) requiere que todas las expresiones `SELECT`, `HAVING`, y `ORDER BY` las cláusulas pueden calcularse a partir de claves o de funciones agregadas. En otras palabras, cada columna seleccionada de la tabla debe usarse en claves o dentro de funciones agregadas. Para obtener un comportamiento como en MySQL, puede colocar las otras columnas en el `any` función de agregado. - -## Cualquier pesado (x) {#anyheavyx} - -Selecciona un valor que ocurre con frecuencia [pesos pesados](http://www.cs.umd.edu/~samir/498/karp.pdf) algoritmo. Si hay un valor que se produce más de la mitad de los casos en cada uno de los subprocesos de ejecución de la consulta, se devuelve este valor. Normalmente, el resultado es no determinista. - -``` sql -anyHeavy(column) -``` - -**Argumento** - -- `column` – The column name. - -**Ejemplo** - -Tome el [A tiempo](../../getting-started/example-datasets/ontime.md) conjunto de datos y seleccione cualquier valor que ocurra con frecuencia `AirlineID` columna. - -``` sql -SELECT anyHeavy(AirlineID) AS res -FROM ontime -``` - -``` text -┌───res─┐ -│ 19690 │ -└───────┘ -``` - -## Cualquier último (x) {#anylastx} - -Selecciona el último valor encontrado. -El resultado es tan indeterminado como para el `any` función. - -## Método de codificación de datos: {#groupbitand} - -Se aplica bit a bit `AND` para la serie de números. - -``` sql -groupBitAnd(expr) -``` - -**Parámetros** - -`expr` – An expression that results in `UInt*` tipo. - -**Valor de retorno** - -Valor de la `UInt*` tipo. - -**Ejemplo** - -Datos de prueba: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Consulta: - -``` sql -SELECT groupBitAnd(num) FROM t -``` - -Donde `num` es la columna con los datos de prueba. - -Resultado: - -``` text -binary decimal -00000100 = 4 -``` - -## GrupoBitO {#groupbitor} - -Se aplica bit a bit `OR` para la serie de números. - -``` sql -groupBitOr(expr) -``` - -**Parámetros** - -`expr` – An expression that results in `UInt*` tipo. - -**Valor de retorno** - -Valor de la `UInt*` tipo. - -**Ejemplo** - -Datos de prueba: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Consulta: - -``` sql -SELECT groupBitOr(num) FROM t -``` - -Donde `num` es la columna con los datos de prueba. - -Resultado: - -``` text -binary decimal -01111101 = 125 -``` - -## GrupoBitXor {#groupbitxor} - -Se aplica bit a bit `XOR` para la serie de números. - -``` sql -groupBitXor(expr) -``` - -**Parámetros** - -`expr` – An expression that results in `UInt*` tipo. - -**Valor de retorno** - -Valor de la `UInt*` tipo. - -**Ejemplo** - -Datos de prueba: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Consulta: - -``` sql -SELECT groupBitXor(num) FROM t -``` - -Donde `num` es la columna con los datos de prueba. - -Resultado: - -``` text -binary decimal -01101000 = 104 -``` - -## Método de codificación de datos: {#groupbitmap} - -Mapa de bits o cálculos agregados de una columna entera sin signo, devuelve cardinalidad de tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../../sql-reference/functions/bitmap-functions.md). - -``` sql -groupBitmap(expr) -``` - -**Parámetros** - -`expr` – An expression that results in `UInt*` tipo. - -**Valor de retorno** - -Valor de la `UInt64` tipo. - -**Ejemplo** - -Datos de prueba: - -``` text -UserID -1 -1 -2 -3 -``` - -Consulta: - -``` sql -SELECT groupBitmap(UserID) as num FROM t -``` - -Resultado: - -``` text -num -3 -``` - -## Mínimo (x) {#agg_function-min} - -Calcula el mínimo. - -## máximo (x) {#agg_function-max} - -Calcula el máximo. - -## ¿Cómo puedo hacerlo?) {#agg-function-argmin} - -Calcula el ‘arg’ para un valor mínimo ‘val’ valor. Si hay varios valores diferentes de ‘arg’ para valores mínimos de ‘val’, el primero de estos valores encontrados es la salida. - -**Ejemplo:** - -``` text -┌─user─────┬─salary─┐ -│ director │ 5000 │ -│ manager │ 3000 │ -│ worker │ 1000 │ -└──────────┴────────┘ -``` - -``` sql -SELECT argMin(user, salary) FROM salary -``` - -``` text -┌─argMin(user, salary)─┐ -│ worker │ -└──────────────────────┘ -``` - -## Descripción) {#agg-function-argmax} - -Calcula el ‘arg’ para un valor máximo ‘val’ valor. Si hay varios valores diferentes de ‘arg’ para valores máximos de ‘val’, el primero de estos valores encontrados es la salida. - -## suma (x) {#agg_function-sum} - -Calcula la suma. -Solo funciona para números. - -## ¿Cómo puedo obtener más información?) {#sumwithoverflowx} - -Calcula la suma de los números, utilizando el mismo tipo de datos para el resultado que para los parámetros de entrada. Si la suma supera el valor máximo para este tipo de datos, la función devuelve un error. - -Solo funciona para números. - -## Por ejemplo, el valor es el siguiente:)) {#agg_functions-summap} - -Totals el ‘value’ matriz de acuerdo con las claves especificadas en el ‘key’ matriz. -Pasar una tupla de matrices de claves y valores es sinónimo de pasar dos matrices de claves y valores. -El número de elementos en ‘key’ y ‘value’ debe ser el mismo para cada fila que se sume. -Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. - -Ejemplo: - -``` sql -CREATE TABLE sum_map( - date Date, - timeslot DateTime, - statusMap Nested( - status UInt16, - requests UInt64 - ), - statusMapTuple Tuple(Array(Int32), Array(Int32)) -) ENGINE = Log; -INSERT INTO sum_map VALUES - ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10], ([1, 2, 3], [10, 10, 10])), - ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10], ([3, 4, 5], [10, 10, 10])), - ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10], ([4, 5, 6], [10, 10, 10])), - ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10], ([6, 7, 8], [10, 10, 10])); - -SELECT - timeslot, - sumMap(statusMap.status, statusMap.requests), - sumMap(statusMapTuple) -FROM sum_map -GROUP BY timeslot -``` - -``` text -┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┬─sumMap(statusMapTuple)─────────┐ -│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ ([1,2,3,4,5],[10,10,20,10,10]) │ -│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ ([4,5,6,7,8],[10,10,20,10,10]) │ -└─────────────────────┴──────────────────────────────────────────────┴────────────────────────────────┘ -``` - -## SkewPop {#skewpop} - -Calcula el [la asimetría](https://en.wikipedia.org/wiki/Skewness) de una secuencia. - -``` sql -skewPop(expr) -``` - -**Parámetros** - -`expr` — [Expresion](../syntax.md#syntax-expressions) devolviendo un número. - -**Valor devuelto** - -The skewness of the given distribution. Type — [Float64](../../sql-reference/data-types/float.md) - -**Ejemplo** - -``` sql -SELECT skewPop(value) FROM series_with_value_column -``` - -## Sistema abierto {#skewsamp} - -Calcula el [asimetría de la muestra](https://en.wikipedia.org/wiki/Skewness) de una secuencia. - -Representa una estimación imparcial de la asimetría de una variable aleatoria si los valores pasados forman su muestra. - -``` sql -skewSamp(expr) -``` - -**Parámetros** - -`expr` — [Expresion](../syntax.md#syntax-expressions) devolviendo un número. - -**Valor devuelto** - -The skewness of the given distribution. Type — [Float64](../../sql-reference/data-types/float.md). Si `n <= 1` (`n` es el tamaño de la muestra), luego la función devuelve `nan`. - -**Ejemplo** - -``` sql -SELECT skewSamp(value) FROM series_with_value_column -``` - -## KurtPop {#kurtpop} - -Calcula el [curtosis](https://en.wikipedia.org/wiki/Kurtosis) de una secuencia. - -``` sql -kurtPop(expr) -``` - -**Parámetros** - -`expr` — [Expresion](../syntax.md#syntax-expressions) devolviendo un número. - -**Valor devuelto** - -The kurtosis of the given distribution. Type — [Float64](../../sql-reference/data-types/float.md) - -**Ejemplo** - -``` sql -SELECT kurtPop(value) FROM series_with_value_column -``` - -## KurtSamp {#kurtsamp} - -Calcula el [curtosis muestra](https://en.wikipedia.org/wiki/Kurtosis) de una secuencia. - -Representa una estimación imparcial de la curtosis de una variable aleatoria si los valores pasados forman su muestra. - -``` sql -kurtSamp(expr) -``` - -**Parámetros** - -`expr` — [Expresion](../syntax.md#syntax-expressions) devolviendo un número. - -**Valor devuelto** - -The kurtosis of the given distribution. Type — [Float64](../../sql-reference/data-types/float.md). Si `n <= 1` (`n` es un tamaño de la muestra), luego la función devuelve `nan`. - -**Ejemplo** - -``` sql -SELECT kurtSamp(value) FROM series_with_value_column -``` - -## Acerca de) {#agg_function-avg} - -Calcula el promedio. -Solo funciona para números. -El resultado es siempre Float64. - -## avgPonderado {#avgweighted} - -Calcula el [media aritmética ponderada](https://en.wikipedia.org/wiki/Weighted_arithmetic_mean). - -**Sintaxis** - -``` sql -avgWeighted(x, weight) -``` - -**Parámetros** - -- `x` — Values. [Entero](../data-types/int-uint.md) o [punto flotante](../data-types/float.md). -- `weight` — Weights of the values. [Entero](../data-types/int-uint.md) o [punto flotante](../data-types/float.md). - -Tipo de `x` y `weight` debe ser el mismo. - -**Valor devuelto** - -- Media ponderada. -- `NaN`. Si todos los pesos son iguales a 0. - -Tipo: [Float64](../data-types/float.md). - -**Ejemplo** - -Consulta: - -``` sql -SELECT avgWeighted(x, w) -FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) -``` - -Resultado: - -``` text -┌─avgWeighted(x, weight)─┐ -│ 8 │ -└────────────────────────┘ -``` - -## uniq {#agg_function-uniq} - -Calcula el número aproximado de diferentes valores del argumento. - -``` sql -uniq(x[, ...]) -``` - -**Parámetros** - -La función toma un número variable de parámetros. Los parámetros pueden ser `Tuple`, `Array`, `Date`, `DateTime`, `String`, o tipos numéricos. - -**Valor devuelto** - -- A [UInt64](../../sql-reference/data-types/int-uint.md)-tipo número. - -**Detalles de implementación** - -Función: - -- Calcula un hash para todos los parámetros en el agregado, luego lo usa en los cálculos. - -- Utiliza un algoritmo de muestreo adaptativo. Para el estado de cálculo, la función utiliza una muestra de valores hash de elemento de hasta 65536. - - This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. - -- Proporciona el resultado de forma determinista (no depende del orden de procesamiento de la consulta). - -Recomendamos usar esta función en casi todos los escenarios. - -**Ver también** - -- [uniqCombined](#agg_function-uniqcombined) -- [UniqCombined64](#agg_function-uniqcombined64) -- [uniqHLL12](#agg_function-uniqhll12) -- [uniqExact](#agg_function-uniqexact) - -## uniqCombined {#agg_function-uniqcombined} - -Calcula el número aproximado de diferentes valores de argumento. - -``` sql -uniqCombined(HLL_precision)(x[, ...]) -``` - -El `uniqCombined` es una buena opción para calcular el número de valores diferentes. - -**Parámetros** - -La función toma un número variable de parámetros. Los parámetros pueden ser `Tuple`, `Array`, `Date`, `DateTime`, `String`, o tipos numéricos. - -`HLL_precision` es el logaritmo base-2 del número de células en [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). Opcional, puede utilizar la función como `uniqCombined(x[, ...])`. El valor predeterminado para `HLL_precision` es 17, que es efectivamente 96 KiB de espacio (2 ^ 17 celdas, 6 bits cada una). - -**Valor devuelto** - -- Numero [UInt64](../../sql-reference/data-types/int-uint.md)-tipo número. - -**Detalles de implementación** - -Función: - -- Calcula un hash (hash de 64 bits para `String` y 32 bits de lo contrario) para todos los parámetros en el agregado, luego lo usa en los cálculos. - -- Utiliza una combinación de tres algoritmos: matriz, tabla hash e HyperLogLog con una tabla de corrección de errores. - - For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. - -- Proporciona el resultado de forma determinista (no depende del orden de procesamiento de la consulta). - -!!! note "Nota" - Dado que usa hash de 32 bits para no-`String` tipo, el resultado tendrá un error muy alto para cardinalidades significativamente mayores que `UINT_MAX` (el error aumentará rápidamente después de unas pocas decenas de miles de millones de valores distintos), por lo tanto, en este caso debe usar [UniqCombined64](#agg_function-uniqcombined64) - -En comparación con el [uniq](#agg_function-uniq) función, el `uniqCombined`: - -- Consume varias veces menos memoria. -- Calcula con una precisión varias veces mayor. -- Por lo general, tiene un rendimiento ligeramente menor. En algunos escenarios, `uniqCombined` puede funcionar mejor que `uniq`, por ejemplo, con consultas distribuidas que transmiten un gran número de estados de agregación a través de la red. - -**Ver también** - -- [uniq](#agg_function-uniq) -- [UniqCombined64](#agg_function-uniqcombined64) -- [uniqHLL12](#agg_function-uniqhll12) -- [uniqExact](#agg_function-uniqexact) - -## UniqCombined64 {#agg_function-uniqcombined64} - -Lo mismo que [uniqCombined](#agg_function-uniqcombined), pero utiliza hash de 64 bits para todos los tipos de datos. - -## uniqHLL12 {#agg_function-uniqhll12} - -Calcula el número aproximado de diferentes valores de argumento [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) algoritmo. - -``` sql -uniqHLL12(x[, ...]) -``` - -**Parámetros** - -La función toma un número variable de parámetros. Los parámetros pueden ser `Tuple`, `Array`, `Date`, `DateTime`, `String`, o tipos numéricos. - -**Valor devuelto** - -- A [UInt64](../../sql-reference/data-types/int-uint.md)-tipo número. - -**Detalles de implementación** - -Función: - -- Calcula un hash para todos los parámetros en el agregado, luego lo usa en los cálculos. - -- Utiliza el algoritmo HyperLogLog para aproximar el número de valores de argumento diferentes. - - 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). - -- Proporciona el resultado determinado (no depende del orden de procesamiento de la consulta). - -No recomendamos usar esta función. En la mayoría de los casos, use el [uniq](#agg_function-uniq) o [uniqCombined](#agg_function-uniqcombined) función. - -**Ver también** - -- [uniq](#agg_function-uniq) -- [uniqCombined](#agg_function-uniqcombined) -- [uniqExact](#agg_function-uniqexact) - -## uniqExact {#agg_function-uniqexact} - -Calcula el número exacto de diferentes valores de argumento. - -``` sql -uniqExact(x[, ...]) -``` - -Utilice el `uniqExact` función si necesita absolutamente un resultado exacto. De lo contrario, use el [uniq](#agg_function-uniq) función. - -El `uniqExact` función utiliza más memoria que `uniq`, porque el tamaño del estado tiene un crecimiento ilimitado a medida que aumenta el número de valores diferentes. - -**Parámetros** - -La función toma un número variable de parámetros. Los parámetros pueden ser `Tuple`, `Array`, `Date`, `DateTime`, `String`, o tipos numéricos. - -**Ver también** - -- [uniq](#agg_function-uniq) -- [uniqCombined](#agg_function-uniqcombined) -- [uniqHLL12](#agg_function-uniqhll12) - -## ¿Cómo puedo hacerlo?) {#agg_function-grouparray} - -Crea una matriz de valores de argumento. -Los valores se pueden agregar a la matriz en cualquier orden (indeterminado). - -La segunda versión (con el `max_size` parámetro) limita el tamaño de la matriz resultante a `max_size` elemento. -Por ejemplo, `groupArray (1) (x)` es equivalente a `[any (x)]`. - -En algunos casos, aún puede confiar en el orden de ejecución. Esto se aplica a los casos en que `SELECT` procede de una subconsulta que utiliza `ORDER BY`. - -## GrupoArrayInsertAt {#grouparrayinsertat} - -Inserta un valor en la matriz en la posición especificada. - -**Sintaxis** - -``` sql -groupArrayInsertAt(default_x, size)(x, pos); -``` - -Si en una consulta se insertan varios valores en la misma posición, la función se comporta de las siguientes maneras: - -- Si se ejecuta una consulta en un solo subproceso, se utiliza el primero de los valores insertados. -- Si una consulta se ejecuta en varios subprocesos, el valor resultante es uno indeterminado de los valores insertados. - -**Parámetros** - -- `x` — Value to be inserted. [Expresion](../syntax.md#syntax-expressions) lo que resulta en uno de los [tipos de datos compatibles](../../sql-reference/data-types/index.md). -- `pos` — Position at which the specified element `x` se va a insertar. La numeración de índices en la matriz comienza desde cero. [UInt32](../../sql-reference/data-types/int-uint.md#uint-ranges). -- `default_x`— Default value for substituting in empty positions. Optional parameter. [Expresion](../syntax.md#syntax-expressions) dando como resultado el tipo de datos configurado para `x` parámetro. Si `default_x` no está definido, el [valores predeterminados](../../sql-reference/statements/create.md#create-default-values) se utilizan. -- `size`— Length of the resulting array. Optional parameter. When using this parameter, the default value `default_x` debe ser especificado. [UInt32](../../sql-reference/data-types/int-uint.md#uint-ranges). - -**Valor devuelto** - -- Matriz con valores insertados. - -Tipo: [Matriz](../../sql-reference/data-types/array.md#data-type-array). - -**Ejemplo** - -Consulta: - -``` sql -SELECT groupArrayInsertAt(toString(number), number * 2) FROM numbers(5); -``` - -Resultado: - -``` text -┌─groupArrayInsertAt(toString(number), multiply(number, 2))─┐ -│ ['0','','1','','2','','3','','4'] │ -└───────────────────────────────────────────────────────────┘ -``` - -Consulta: - -``` sql -SELECT groupArrayInsertAt('-')(toString(number), number * 2) FROM numbers(5); -``` - -Resultado: - -``` text -┌─groupArrayInsertAt('-')(toString(number), multiply(number, 2))─┐ -│ ['0','-','1','-','2','-','3','-','4'] │ -└────────────────────────────────────────────────────────────────┘ -``` - -Consulta: - -``` sql -SELECT groupArrayInsertAt('-', 5)(toString(number), number * 2) FROM numbers(5); -``` - -Resultado: - -``` text -┌─groupArrayInsertAt('-', 5)(toString(number), multiply(number, 2))─┐ -│ ['0','-','1','-','2'] │ -└───────────────────────────────────────────────────────────────────┘ -``` - -Inserción multihilo de elementos en una posición. - -Consulta: - -``` sql -SELECT groupArrayInsertAt(number, 0) FROM numbers_mt(10) SETTINGS max_block_size = 1; -``` - -Como resultado de esta consulta, obtiene un entero aleatorio en el `[0,9]` gama. Por ejemplo: - -``` text -┌─groupArrayInsertAt(number, 0)─┐ -│ [7] │ -└───────────────────────────────┘ -``` - -## groupArrayMovingSum {#agg_function-grouparraymovingsum} - -Calcula la suma móvil de los valores de entrada. - -``` sql -groupArrayMovingSum(numbers_for_summing) -groupArrayMovingSum(window_size)(numbers_for_summing) -``` - -La función puede tomar el tamaño de la ventana como un parámetro. Si no se especifica, la función toma el tamaño de ventana igual al número de filas de la columna. - -**Parámetros** - -- `numbers_for_summing` — [Expresion](../syntax.md#syntax-expressions) dando como resultado un valor de tipo de datos numérico. -- `window_size` — Size of the calculation window. - -**Valores devueltos** - -- Matriz del mismo tamaño y tipo que los datos de entrada. - -**Ejemplo** - -La tabla de ejemplo: - -``` sql -CREATE TABLE t -( - `int` UInt8, - `float` Float32, - `dec` Decimal32(2) -) -ENGINE = TinyLog -``` - -``` text -┌─int─┬─float─┬──dec─┐ -│ 1 │ 1.1 │ 1.10 │ -│ 2 │ 2.2 │ 2.20 │ -│ 4 │ 4.4 │ 4.40 │ -│ 7 │ 7.77 │ 7.77 │ -└─────┴───────┴──────┘ -``` - -Consulta: - -``` sql -SELECT - groupArrayMovingSum(int) AS I, - groupArrayMovingSum(float) AS F, - groupArrayMovingSum(dec) AS D -FROM t -``` - -``` text -┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ -│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ -└────────────┴─────────────────────────────────┴────────────────────────┘ -``` - -``` sql -SELECT - groupArrayMovingSum(2)(int) AS I, - groupArrayMovingSum(2)(float) AS F, - groupArrayMovingSum(2)(dec) AS D -FROM t -``` - -``` text -┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ -│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ -└────────────┴─────────────────────────────────┴────────────────────────┘ -``` - -## Método de codificación de datos: {#agg_function-grouparraymovingavg} - -Calcula la media móvil de los valores de entrada. - -``` sql -groupArrayMovingAvg(numbers_for_summing) -groupArrayMovingAvg(window_size)(numbers_for_summing) -``` - -La función puede tomar el tamaño de la ventana como un parámetro. Si no se especifica, la función toma el tamaño de ventana igual al número de filas de la columna. - -**Parámetros** - -- `numbers_for_summing` — [Expresion](../syntax.md#syntax-expressions) dando como resultado un valor de tipo de datos numérico. -- `window_size` — Size of the calculation window. - -**Valores devueltos** - -- Matriz del mismo tamaño y tipo que los datos de entrada. - -La función utiliza [redondeando hacia cero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). Trunca los decimales insignificantes para el tipo de datos resultante. - -**Ejemplo** - -La tabla de ejemplo `b`: - -``` sql -CREATE TABLE t -( - `int` UInt8, - `float` Float32, - `dec` Decimal32(2) -) -ENGINE = TinyLog -``` - -``` text -┌─int─┬─float─┬──dec─┐ -│ 1 │ 1.1 │ 1.10 │ -│ 2 │ 2.2 │ 2.20 │ -│ 4 │ 4.4 │ 4.40 │ -│ 7 │ 7.77 │ 7.77 │ -└─────┴───────┴──────┘ -``` - -Consulta: - -``` sql -SELECT - groupArrayMovingAvg(int) AS I, - groupArrayMovingAvg(float) AS F, - groupArrayMovingAvg(dec) AS D -FROM t -``` - -``` text -┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ -│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ -└───────────┴─────────────────────────────────────┴───────────────────────┘ -``` - -``` sql -SELECT - groupArrayMovingAvg(2)(int) AS I, - groupArrayMovingAvg(2)(float) AS F, - groupArrayMovingAvg(2)(dec) AS D -FROM t -``` - -``` text -┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ -│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ -└───────────┴──────────────────────────────────┴───────────────────────┘ -``` - -## ¿Cómo puedo obtener más información?) {#groupuniqarrayx-groupuniqarraymax-sizex} - -Crea una matriz a partir de diferentes valores de argumento. El consumo de memoria es el mismo que para el `uniqExact` función. - -La segunda versión (con el `max_size` parámetro) limita el tamaño de la matriz resultante a `max_size` elemento. -Por ejemplo, `groupUniqArray(1)(x)` es equivalente a `[any(x)]`. - -## cuantil {#quantile} - -Calcula un aproximado [cuantil](https://en.wikipedia.org/wiki/Quantile) de una secuencia de datos numéricos. - -Esta función se aplica [muestreo de embalses](https://en.wikipedia.org/wiki/Reservoir_sampling) con un tamaño de depósito de hasta 8192 y un generador de números aleatorios para el muestreo. El resultado es no determinista. Para obtener un cuantil exacto, use el [quantileExact](#quantileexact) función. - -Cuando se utilizan múltiples `quantile*` funciones con diferentes niveles en una consulta, los estados internos no se combinan (es decir, la consulta funciona de manera menos eficiente de lo que podría). En este caso, use el [cantiles](#quantiles) función. - -**Sintaxis** - -``` sql -quantile(level)(expr) -``` - -Apodo: `median`. - -**Parámetros** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql-reference/data-types/index.md#data_types), [Fecha](../../sql-reference/data-types/date.md) o [FechaHora](../../sql-reference/data-types/datetime.md). - -**Valor devuelto** - -- Cuantil aproximado del nivel especificado. - -Tipo: - -- [Float64](../../sql-reference/data-types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../sql-reference/data-types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../sql-reference/data-types/datetime.md) si los valores de entrada tienen `DateTime` tipo. - -**Ejemplo** - -Tabla de entrada: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Consulta: - -``` sql -SELECT quantile(val) FROM t -``` - -Resultado: - -``` text -┌─quantile(val)─┐ -│ 1.5 │ -└───────────────┘ -``` - -**Ver también** - -- [mediana](#median) -- [cantiles](#quantiles) - -## quantileDeterminista {#quantiledeterministic} - -Calcula un aproximado [cuantil](https://en.wikipedia.org/wiki/Quantile) de una secuencia de datos numéricos. - -Esta función se aplica [muestreo de embalses](https://en.wikipedia.org/wiki/Reservoir_sampling) con un tamaño de depósito de hasta 8192 y algoritmo determinista de muestreo. El resultado es determinista. Para obtener un cuantil exacto, use el [quantileExact](#quantileexact) función. - -Cuando se utilizan múltiples `quantile*` funciones con diferentes niveles en una consulta, los estados internos no se combinan (es decir, la consulta funciona de manera menos eficiente de lo que podría). En este caso, use el [cantiles](#quantiles) función. - -**Sintaxis** - -``` sql -quantileDeterministic(level)(expr, determinator) -``` - -Apodo: `medianDeterministic`. - -**Parámetros** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql-reference/data-types/index.md#data_types), [Fecha](../../sql-reference/data-types/date.md) o [FechaHora](../../sql-reference/data-types/datetime.md). -- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. - -**Valor devuelto** - -- Cuantil aproximado del nivel especificado. - -Tipo: - -- [Float64](../../sql-reference/data-types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../sql-reference/data-types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../sql-reference/data-types/datetime.md) si los valores de entrada tienen `DateTime` tipo. - -**Ejemplo** - -Tabla de entrada: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Consulta: - -``` sql -SELECT quantileDeterministic(val, 1) FROM t -``` - -Resultado: - -``` text -┌─quantileDeterministic(val, 1)─┐ -│ 1.5 │ -└───────────────────────────────┘ -``` - -**Ver también** - -- [mediana](#median) -- [cantiles](#quantiles) - -## quantileExact {#quantileexact} - -Calcula exactamente el [cuantil](https://en.wikipedia.org/wiki/Quantile) de una secuencia de datos numéricos. - -To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memoria, donde `n` es un número de valores que se pasaron. Sin embargo, para un pequeño número de valores, la función es muy efectiva. - -Cuando se utilizan múltiples `quantile*` funciones con diferentes niveles en una consulta, los estados internos no se combinan (es decir, la consulta funciona de manera menos eficiente de lo que podría). En este caso, use el [cantiles](#quantiles) función. - -**Sintaxis** - -``` sql -quantileExact(level)(expr) -``` - -Apodo: `medianExact`. - -**Parámetros** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql-reference/data-types/index.md#data_types), [Fecha](../../sql-reference/data-types/date.md) o [FechaHora](../../sql-reference/data-types/datetime.md). - -**Valor devuelto** - -- Cuantil del nivel especificado. - -Tipo: - -- [Float64](../../sql-reference/data-types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../sql-reference/data-types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../sql-reference/data-types/datetime.md) si los valores de entrada tienen `DateTime` tipo. - -**Ejemplo** - -Consulta: - -``` sql -SELECT quantileExact(number) FROM numbers(10) -``` - -Resultado: - -``` text -┌─quantileExact(number)─┐ -│ 5 │ -└───────────────────────┘ -``` - -**Ver también** - -- [mediana](#median) -- [cantiles](#quantiles) - -## quantileExactWeighted {#quantileexactweighted} - -Calcula exactamente el [cuantil](https://en.wikipedia.org/wiki/Quantile) de una secuencia de datos numéricos, teniendo en cuenta el peso de cada elemento. - -To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [quantileExact](#quantileexact). Puede usar esta función en lugar de `quantileExact` y especifique el peso 1. - -Cuando se utilizan múltiples `quantile*` funciones con diferentes niveles en una consulta, los estados internos no se combinan (es decir, la consulta funciona de manera menos eficiente de lo que podría). En este caso, use el [cantiles](#quantiles) función. - -**Sintaxis** - -``` sql -quantileExactWeighted(level)(expr, weight) -``` - -Apodo: `medianExactWeighted`. - -**Parámetros** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql-reference/data-types/index.md#data_types), [Fecha](../../sql-reference/data-types/date.md) o [FechaHora](../../sql-reference/data-types/datetime.md). -- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. - -**Valor devuelto** - -- Cuantil del nivel especificado. - -Tipo: - -- [Float64](../../sql-reference/data-types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../sql-reference/data-types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../sql-reference/data-types/datetime.md) si los valores de entrada tienen `DateTime` tipo. - -**Ejemplo** - -Tabla de entrada: - -``` text -┌─n─┬─val─┐ -│ 0 │ 3 │ -│ 1 │ 2 │ -│ 2 │ 1 │ -│ 5 │ 4 │ -└───┴─────┘ -``` - -Consulta: - -``` sql -SELECT quantileExactWeighted(n, val) FROM t -``` - -Resultado: - -``` text -┌─quantileExactWeighted(n, val)─┐ -│ 1 │ -└───────────────────────────────┘ -``` - -**Ver también** - -- [mediana](#median) -- [cantiles](#quantiles) - -## quantileTiming {#quantiletiming} - -Con la precisión determinada calcula el [cuantil](https://en.wikipedia.org/wiki/Quantile) de una secuencia de datos numéricos. - -El resultado es determinista (no depende del orden de procesamiento de la consulta). La función está optimizada para trabajar con secuencias que describen distribuciones como tiempos de carga de páginas web o tiempos de respuesta de back-end. - -Cuando se utilizan múltiples `quantile*` funciones con diferentes niveles en una consulta, los estados internos no se combinan (es decir, la consulta funciona de manera menos eficiente de lo que podría). En este caso, use el [cantiles](#quantiles) función. - -**Sintaxis** - -``` sql -quantileTiming(level)(expr) -``` - -Apodo: `medianTiming`. - -**Parámetros** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). - -- `expr` — [Expresion](../syntax.md#syntax-expressions) sobre una columna valores que devuelven un [Flotante\*](../../sql-reference/data-types/float.md)-tipo número. - - - If negative values are passed to the function, the behavior is undefined. - - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. - -**Exactitud** - -El cálculo es preciso si: - -- El número total de valores no supera los 5670. -- El número total de valores supera los 5670, pero el tiempo de carga de la página es inferior a 1024 ms. - -De lo contrario, el resultado del cálculo se redondea al múltiplo más cercano de 16 ms. - -!!! note "Nota" - Para calcular los cuantiles de tiempo de carga de la página, esta función es más efectiva y precisa que [cuantil](#quantile). - -**Valor devuelto** - -- Cuantil del nivel especificado. - -Tipo: `Float32`. - -!!! note "Nota" - Si no se pasan valores a la función (cuando se `quantileTimingIf`), [NaN](../../sql-reference/data-types/float.md#data_type-float-nan-inf) se devuelve. El propósito de esto es diferenciar estos casos de los casos que resultan en cero. Ver [ORDER BY cláusula](../statements/select/order-by.md#select-order-by) para notas sobre la clasificación `NaN` valor. - -**Ejemplo** - -Tabla de entrada: - -``` text -┌─response_time─┐ -│ 72 │ -│ 112 │ -│ 126 │ -│ 145 │ -│ 104 │ -│ 242 │ -│ 313 │ -│ 168 │ -│ 108 │ -└───────────────┘ -``` - -Consulta: - -``` sql -SELECT quantileTiming(response_time) FROM t -``` - -Resultado: - -``` text -┌─quantileTiming(response_time)─┐ -│ 126 │ -└───────────────────────────────┘ -``` - -**Ver también** - -- [mediana](#median) -- [cantiles](#quantiles) - -## quantileTimingWeighted {#quantiletimingweighted} - -Con la precisión determinada calcula el [cuantil](https://en.wikipedia.org/wiki/Quantile) de una secuencia de datos numéricos según el peso de cada miembro de secuencia. - -El resultado es determinista (no depende del orden de procesamiento de la consulta). La función está optimizada para trabajar con secuencias que describen distribuciones como tiempos de carga de páginas web o tiempos de respuesta de back-end. - -Cuando se utilizan múltiples `quantile*` funciones con diferentes niveles en una consulta, los estados internos no se combinan (es decir, la consulta funciona de manera menos eficiente de lo que podría). En este caso, use el [cantiles](#quantiles) función. - -**Sintaxis** - -``` sql -quantileTimingWeighted(level)(expr, weight) -``` - -Apodo: `medianTimingWeighted`. - -**Parámetros** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). - -- `expr` — [Expresion](../syntax.md#syntax-expressions) sobre una columna valores que devuelven un [Flotante\*](../../sql-reference/data-types/float.md)-tipo número. - - - If negative values are passed to the function, the behavior is undefined. - - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. - -- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. - -**Exactitud** - -El cálculo es preciso si: - -- El número total de valores no supera los 5670. -- El número total de valores supera los 5670, pero el tiempo de carga de la página es inferior a 1024 ms. - -De lo contrario, el resultado del cálculo se redondea al múltiplo más cercano de 16 ms. - -!!! note "Nota" - Para calcular los cuantiles de tiempo de carga de la página, esta función es más efectiva y precisa que [cuantil](#quantile). - -**Valor devuelto** - -- Cuantil del nivel especificado. - -Tipo: `Float32`. - -!!! note "Nota" - Si no se pasan valores a la función (cuando se `quantileTimingIf`), [NaN](../../sql-reference/data-types/float.md#data_type-float-nan-inf) se devuelve. El propósito de esto es diferenciar estos casos de los casos que resultan en cero. Ver [ORDER BY cláusula](../statements/select/order-by.md#select-order-by) para notas sobre la clasificación `NaN` valor. - -**Ejemplo** - -Tabla de entrada: - -``` text -┌─response_time─┬─weight─┐ -│ 68 │ 1 │ -│ 104 │ 2 │ -│ 112 │ 3 │ -│ 126 │ 2 │ -│ 138 │ 1 │ -│ 162 │ 1 │ -└───────────────┴────────┘ -``` - -Consulta: - -``` sql -SELECT quantileTimingWeighted(response_time, weight) FROM t -``` - -Resultado: - -``` text -┌─quantileTimingWeighted(response_time, weight)─┐ -│ 112 │ -└───────────────────────────────────────────────┘ -``` - -**Ver también** - -- [mediana](#median) -- [cantiles](#quantiles) - -## quantileTDigest {#quantiletdigest} - -Calcula un aproximado [cuantil](https://en.wikipedia.org/wiki/Quantile) de una secuencia de datos numéricos usando el [T-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algoritmo. - -El error máximo es 1%. El consumo de memoria es `log(n)`, donde `n` es un número de valores. El resultado depende del orden de ejecución de la consulta y no es determinista. - -El rendimiento de la función es menor que el rendimiento de [cuantil](#quantile) o [quantileTiming](#quantiletiming). En términos de la relación entre el tamaño del estado y la precisión, esta función es mucho mejor que `quantile`. - -Cuando se utilizan múltiples `quantile*` funciones con diferentes niveles en una consulta, los estados internos no se combinan (es decir, la consulta funciona de manera menos eficiente de lo que podría). En este caso, use el [cantiles](#quantiles) función. - -**Sintaxis** - -``` sql -quantileTDigest(level)(expr) -``` - -Apodo: `medianTDigest`. - -**Parámetros** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql-reference/data-types/index.md#data_types), [Fecha](../../sql-reference/data-types/date.md) o [FechaHora](../../sql-reference/data-types/datetime.md). - -**Valor devuelto** - -- Cuantil aproximado del nivel especificado. - -Tipo: - -- [Float64](../../sql-reference/data-types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../sql-reference/data-types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../sql-reference/data-types/datetime.md) si los valores de entrada tienen `DateTime` tipo. - -**Ejemplo** - -Consulta: - -``` sql -SELECT quantileTDigest(number) FROM numbers(10) -``` - -Resultado: - -``` text -┌─quantileTDigest(number)─┐ -│ 4.5 │ -└─────────────────────────┘ -``` - -**Ver también** - -- [mediana](#median) -- [cantiles](#quantiles) - -## quantileTDigestWeighted {#quantiletdigestweighted} - -Calcula un aproximado [cuantil](https://en.wikipedia.org/wiki/Quantile) de una secuencia de datos numéricos usando el [T-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algoritmo. La función tiene en cuenta el peso de cada miembro de secuencia. El error máximo es 1%. El consumo de memoria es `log(n)`, donde `n` es un número de valores. - -El rendimiento de la función es menor que el rendimiento de [cuantil](#quantile) o [quantileTiming](#quantiletiming). En términos de la relación entre el tamaño del estado y la precisión, esta función es mucho mejor que `quantile`. - -El resultado depende del orden de ejecución de la consulta y no es determinista. - -Cuando se utilizan múltiples `quantile*` funciones con diferentes niveles en una consulta, los estados internos no se combinan (es decir, la consulta funciona de manera menos eficiente de lo que podría). En este caso, use el [cantiles](#quantiles) función. - -**Sintaxis** - -``` sql -quantileTDigest(level)(expr) -``` - -Apodo: `medianTDigest`. - -**Parámetros** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql-reference/data-types/index.md#data_types), [Fecha](../../sql-reference/data-types/date.md) o [FechaHora](../../sql-reference/data-types/datetime.md). -- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. - -**Valor devuelto** - -- Cuantil aproximado del nivel especificado. - -Tipo: - -- [Float64](../../sql-reference/data-types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../sql-reference/data-types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../sql-reference/data-types/datetime.md) si los valores de entrada tienen `DateTime` tipo. - -**Ejemplo** - -Consulta: - -``` sql -SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) -``` - -Resultado: - -``` text -┌─quantileTDigestWeighted(number, 1)─┐ -│ 4.5 │ -└────────────────────────────────────┘ -``` - -**Ver también** - -- [mediana](#median) -- [cantiles](#quantiles) - -## mediana {#median} - -El `median*` funciones son los alias para el `quantile*` función. Calculan la mediana de una muestra de datos numéricos. - -Función: - -- `median` — Alias for [cuantil](#quantile). -- `medianDeterministic` — Alias for [quantileDeterminista](#quantiledeterministic). -- `medianExact` — Alias for [quantileExact](#quantileexact). -- `medianExactWeighted` — Alias for [quantileExactWeighted](#quantileexactweighted). -- `medianTiming` — Alias for [quantileTiming](#quantiletiming). -- `medianTimingWeighted` — Alias for [quantileTimingWeighted](#quantiletimingweighted). -- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). -- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). - -**Ejemplo** - -Tabla de entrada: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Consulta: - -``` sql -SELECT medianDeterministic(val, 1) FROM t -``` - -Resultado: - -``` text -┌─medianDeterministic(val, 1)─┐ -│ 1.5 │ -└─────────────────────────────┘ -``` - -## quantiles(level1, level2, …)(x) {#quantiles} - -Todas las funciones de cuantiles también tienen funciones de cuantiles correspondientes: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. Estas funciones calculan todos los cuantiles de los niveles enumerados en una sola pasada y devuelven una matriz de los valores resultantes. - -## Acerca de Nosotros) {#varsampx} - -Calcula la cantidad `Σ((x - x̅)^2) / (n - 1)`, donde `n` es el tamaño de la muestra y `x̅`es el valor promedio de `x`. - -Representa una estimación imparcial de la varianza de una variable aleatoria si los valores pasados forman su muestra. - -Devoluciones `Float64`. Cuando `n <= 1`, devoluciones `+∞`. - -!!! note "Nota" - Esta función utiliza un algoritmo numéricamente inestable. Si necesita [estabilidad numérica](https://en.wikipedia.org/wiki/Numerical_stability) en los cálculos, utilice el `varSampStable` función. Funciona más lento, pero proporciona un menor error computacional. - -## Nombre de la red inalámbrica (SSID):) {#varpopx} - -Calcula la cantidad `Σ((x - x̅)^2) / n`, donde `n` es el tamaño de la muestra y `x̅`es el valor promedio de `x`. - -En otras palabras, dispersión para un conjunto de valores. Devoluciones `Float64`. - -!!! note "Nota" - Esta función utiliza un algoritmo numéricamente inestable. Si necesita [estabilidad numérica](https://en.wikipedia.org/wiki/Numerical_stability) en los cálculos, utilice el `varPopStable` función. Funciona más lento, pero proporciona un menor error computacional. - -## Soporte técnico) {#stddevsampx} - -El resultado es igual a la raíz cuadrada de `varSamp(x)`. - -!!! note "Nota" - Esta función utiliza un algoritmo numéricamente inestable. Si necesita [estabilidad numérica](https://en.wikipedia.org/wiki/Numerical_stability) en los cálculos, utilice el `stddevSampStable` función. Funciona más lento, pero proporciona un menor error computacional. - -## stddevPop(x) {#stddevpopx} - -El resultado es igual a la raíz cuadrada de `varPop(x)`. - -!!! note "Nota" - Esta función utiliza un algoritmo numéricamente inestable. Si necesita [estabilidad numérica](https://en.wikipedia.org/wiki/Numerical_stability) en los cálculos, utilice el `stddevPopStable` función. Funciona más lento, pero proporciona un menor error computacional. - -## topK(N)(x) {#topknx} - -Devuelve una matriz de los valores aproximadamente más frecuentes de la columna especificada. La matriz resultante se ordena en orden descendente de frecuencia aproximada de valores (no por los valores mismos). - -Implementa el [Ahorro de espacio filtrado](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) algoritmo para analizar TopK, basado en el algoritmo de reducción y combinación de [Ahorro de espacio paralelo](https://arxiv.org/pdf/1401.0702.pdf). - -``` sql -topK(N)(column) -``` - -Esta función no proporciona un resultado garantizado. En ciertas situaciones, pueden producirse errores y pueden devolver valores frecuentes que no son los valores más frecuentes. - -Recomendamos usar el `N < 10` valor; el rendimiento se reduce con grandes `N` valor. Valor máximo de `N = 65536`. - -**Parámetros** - -- ‘N’ es el número de elementos a devolver. - -Si se omite el parámetro, se utiliza el valor predeterminado 10. - -**Argumento** - -- ' x ' – The value to calculate frequency. - -**Ejemplo** - -Tome el [A tiempo](../../getting-started/example-datasets/ontime.md) conjunto de datos y seleccione los tres valores más frecuentes `AirlineID` columna. - -``` sql -SELECT topK(3)(AirlineID) AS res -FROM ontime -``` - -``` text -┌─res─────────────────┐ -│ [19393,19790,19805] │ -└─────────────────────┘ -``` - -## topKPeso {#topkweighted} - -Similar a `topK` pero toma un argumento adicional de tipo entero - `weight`. Cada valor se contabiliza `weight` veces para el cálculo de la frecuencia. - -**Sintaxis** - -``` sql -topKWeighted(N)(x, weight) -``` - -**Parámetros** - -- `N` — The number of elements to return. - -**Argumento** - -- `x` – The value. -- `weight` — The weight. [UInt8](../../sql-reference/data-types/int-uint.md). - -**Valor devuelto** - -Devuelve una matriz de los valores con la suma aproximada máxima de pesos. - -**Ejemplo** - -Consulta: - -``` sql -SELECT topKWeighted(10)(number, number) FROM numbers(1000) -``` - -Resultado: - -``` text -┌─topKWeighted(10)(number, number)──────────┐ -│ [999,998,997,996,995,994,993,992,991,990] │ -└───────────────────────────────────────────┘ -``` - -## covarSamp(x, y) {#covarsampx-y} - -Calcula el valor de `Σ((x - x̅)(y - y̅)) / (n - 1)`. - -Devuelve Float64. Cuando `n <= 1`, returns +∞. - -!!! note "Nota" - Esta función utiliza un algoritmo numéricamente inestable. Si necesita [estabilidad numérica](https://en.wikipedia.org/wiki/Numerical_stability) en los cálculos, utilice el `covarSampStable` función. Funciona más lento, pero proporciona un menor error computacional. - -## covarPop(x, y) {#covarpopx-y} - -Calcula el valor de `Σ((x - x̅)(y - y̅)) / n`. - -!!! note "Nota" - Esta función utiliza un algoritmo numéricamente inestable. Si necesita [estabilidad numérica](https://en.wikipedia.org/wiki/Numerical_stability) en los cálculos, utilice el `covarPopStable` función. Funciona más lento pero proporciona un menor error computacional. - -## corr(x, y) {#corrx-y} - -Calcula el coeficiente de correlación de Pearson: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. - -!!! note "Nota" - Esta función utiliza un algoritmo numéricamente inestable. Si necesita [estabilidad numérica](https://en.wikipedia.org/wiki/Numerical_stability) en los cálculos, utilice el `corrStable` función. Funciona más lento, pero proporciona un menor error computacional. - -## categoricalInformationValue {#categoricalinformationvalue} - -Calcula el valor de `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` para cada categoría. - -``` sql -categoricalInformationValue(category1, category2, ..., tag) -``` - -El resultado indica cómo una característica discreta (categórica `[category1, category2, ...]` contribuir a un modelo de aprendizaje que predice el valor de `tag`. - -## SimpleLinearRegression {#simplelinearregression} - -Realiza una regresión lineal simple (unidimensional). - -``` sql -simpleLinearRegression(x, y) -``` - -Parámetros: - -- `x` — Column with dependent variable values. -- `y` — Column with explanatory variable values. - -Valores devueltos: - -Constante `(a, b)` de la línea resultante `y = a*x + b`. - -**Ejemplos** - -``` sql -SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) -``` - -``` text -┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ -│ (1,0) │ -└───────────────────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) -``` - -``` text -┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ -│ (1,3) │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## stochasticLinearRegression {#agg_functions-stochasticlinearregression} - -Esta función implementa la regresión lineal estocástica. Admite parámetros personalizados para la tasa de aprendizaje, el coeficiente de regularización L2, el tamaño de mini lote y tiene pocos métodos para actualizar los pesos ([Adán](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (utilizado por defecto), [SGD simple](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Impulso](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). - -### Parámetros {#agg_functions-stochasticlinearregression-parameters} - -Hay 4 parámetros personalizables. Se pasan a la función secuencialmente, pero no es necesario pasar los cuatro; se usarán valores predeterminados, sin embargo, un buen modelo requirió algún ajuste de parámetros. - -``` text -stochasticLinearRegression(1.0, 1.0, 10, 'SGD') -``` - -1. `learning rate` es el coeficiente en la longitud del paso, cuando se realiza el paso de descenso de gradiente. Una tasa de aprendizaje demasiado grande puede causar pesos infinitos del modelo. El valor predeterminado es `0.00001`. -2. `l2 regularization coefficient` que puede ayudar a prevenir el sobreajuste. El valor predeterminado es `0.1`. -3. `mini-batch size` establece el número de elementos, cuyos gradientes se calcularán y sumarán para realizar un paso de descenso de gradiente. El descenso estocástico puro usa un elemento, sin embargo, tener lotes pequeños (aproximadamente 10 elementos) hace que los pasos de gradiente sean más estables. El valor predeterminado es `15`. -4. `method for updating weights`, son: `Adam` (predeterminada), `SGD`, `Momentum`, `Nesterov`. `Momentum` y `Nesterov` requieren un poco más de cálculos y memoria, sin embargo, resultan útiles en términos de velocidad de convergencia y estabilidad de los métodos de gradiente estocásticos. - -### Uso {#agg_functions-stochasticlinearregression-usage} - -`stochasticLinearRegression` se utiliza en dos pasos: ajustar el modelo y predecir nuevos datos. Para ajustar el modelo y guardar su estado para su uso posterior, utilizamos `-State` combinador, que básicamente guarda el estado (pesos del modelo, etc.). -Para predecir usamos la función [evalMLMethod](../functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), que toma un estado como argumento, así como características para predecir. - - - -**1.** Accesorio - -Dicha consulta puede ser utilizada. - -``` sql -CREATE TABLE IF NOT EXISTS train_data -( - param1 Float64, - param2 Float64, - target Float64 -) ENGINE = Memory; - -CREATE TABLE your_model ENGINE = Memory AS SELECT -stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) -AS state FROM train_data; -``` - -Aquí también tenemos que insertar datos en `train_data` tabla. El número de parámetros no es fijo, depende solo del número de argumentos, pasados a `linearRegressionState`. Todos deben ser valores numéricos. -Tenga en cuenta que la columna con valor objetivo (que nos gustaría aprender a predecir) se inserta como primer argumento. - -**2.** Predecir - -Después de guardar un estado en la tabla, podemos usarlo varias veces para la predicción, o incluso fusionarlo con otros estados y crear nuevos modelos aún mejores. - -``` sql -WITH (SELECT state FROM your_model) AS model SELECT -evalMLMethod(model, param1, param2) FROM test_data -``` - -La consulta devolverá una columna de valores predichos. Tenga en cuenta que el primer argumento de `evalMLMethod` ser `AggregateFunctionState` objeto, siguiente son columnas de características. - -`test_data` es una mesa como `train_data` pero puede no contener el valor objetivo. - -### Nota {#agg_functions-stochasticlinearregression-notes} - -1. Para fusionar dos modelos, el usuario puede crear dicha consulta: - `sql SELECT state1 + state2 FROM your_models` - donde `your_models` la tabla contiene ambos modelos. Esta consulta devolverá un nuevo `AggregateFunctionState` objeto. - -2. El usuario puede obtener pesos del modelo creado para sus propios fines sin guardar el modelo si no `-State` combinador se utiliza. - `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` - Dicha consulta se ajustará al modelo y devolverá sus pesos: primero son los pesos, que corresponden a los parámetros del modelo, el último es el sesgo. Entonces, en el ejemplo anterior, la consulta devolverá una columna con 3 valores. - -**Ver también** - -- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) -- [Diferencia entre regresiones lineales y logísticas](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) - -## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} - -Esta función implementa la regresión logística estocástica. Se puede usar para problemas de clasificación binaria, admite los mismos parámetros personalizados que stochasticLinearRegression y funciona de la misma manera. - -### Parámetros {#agg_functions-stochasticlogisticregression-parameters} - -Los parámetros son exactamente los mismos que en stochasticLinearRegression: -`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. -Para obtener más información, consulte [parámetros](#agg_functions-stochasticlinearregression-parameters). - -``` text -stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') -``` - -1. Accesorio - - - - See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. - - Predicted labels have to be in \[-1, 1\]. - -1. Predecir - - - - Using saved state we can predict probability of object having label `1`. - - ``` sql - WITH (SELECT state FROM your_model) AS model SELECT - evalMLMethod(model, param1, param2) FROM test_data - ``` - - The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. - - We can also set a bound of probability, which assigns elements to different labels. - - ``` sql - SELECT ans < 1.1 AND ans > 0.5 FROM - (WITH (SELECT state FROM your_model) AS model SELECT - evalMLMethod(model, param1, param2) AS ans FROM test_data) - ``` - - Then the result will be labels. - - `test_data` is a table like `train_data` but may not contain target value. - -**Ver también** - -- [stochasticLinearRegression](#agg_functions-stochasticlinearregression) -- [Diferencia entre regresiones lineales y logísticas.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) - -## Método de codificación de datos: {#groupbitmapand} - -Calcula el AND de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../../sql-reference/functions/bitmap-functions.md). - -``` sql -groupBitmapAnd(expr) -``` - -**Parámetros** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` tipo. - -**Valor de retorno** - -Valor de la `UInt64` tipo. - -**Ejemplo** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapAnd(z)─┐ -│ 3 │ -└───────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ -│ [6,8,10] │ -└──────────────────────────────────────────────────┘ -``` - -## Método de codificación de datos: {#groupbitmapor} - -Calcula el OR de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../../sql-reference/functions/bitmap-functions.md). Esto es equivalente a `groupBitmapMerge`. - -``` sql -groupBitmapOr(expr) -``` - -**Parámetros** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` tipo. - -**Valor de retorno** - -Valor de la `UInt64` tipo. - -**Ejemplo** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapOr(z)─┐ -│ 15 │ -└──────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ -│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ -└─────────────────────────────────────────────────┘ -``` - -## Método de codificación de datos: {#groupbitmapxor} - -Calcula el XOR de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../../sql-reference/functions/bitmap-functions.md). - -``` sql -groupBitmapOr(expr) -``` - -**Parámetros** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` tipo. - -**Valor de retorno** - -Valor de la `UInt64` tipo. - -**Ejemplo** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapXor(z)─┐ -│ 10 │ -└───────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ -│ [1,3,5,6,8,10,11,13,14,15] │ -└──────────────────────────────────────────────────┘ -``` - -[Artículo Original](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/es/sql-reference/ansi.md b/docs/es/sql-reference/ansi.md deleted file mode 100644 index 29e2c5b12e9..00000000000 --- a/docs/es/sql-reference/ansi.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: ad252bbb4f7e2899c448eb42ecc39ff195c8faa1 -toc_priority: 40 -toc_title: Compatibilidad con ANSI ---- - -# Compatibilidad de SQL ANSI de ClickHouse SQL Dialect {#ansi-sql-compatibility-of-clickhouse-sql-dialect} - -!!! note "Nota" - Este artículo se basa en la Tabla 38, “Feature taxonomy and definition for mandatory features”, Annex F of ISO/IEC CD 9075-2:2013. - -## Diferencias en el comportamiento {#differences-in-behaviour} - -En la tabla siguiente se enumeran los casos en que la característica de consulta funciona en ClickHouse, pero no se comporta como se especifica en ANSI SQL. - -| Feature ID | Nombre de la función | Diferencia | -|------------|----------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| -| E011 | Tipos de datos numéricos | El literal numérico con punto se interpreta como aproximado (`Float64`) en lugar de exacta (`Decimal`) | -| E051-05 | Los elementos seleccionados pueden ser renombrados | Los cambios de nombre de los elementos tienen un alcance de visibilidad más amplio que solo el resultado SELECT | -| E141-01 | Restricciones NOT NULL | `NOT NULL` está implícito para las columnas de tabla de forma predeterminada | -| E011-04 | Operadores aritméticos | ClickHouse se desborda en lugar de la aritmética comprobada y cambia el tipo de datos de resultado en función de las reglas personalizadas | - -## Estado de la función {#feature-status} - -| Feature ID | Nombre de la función | Estatus | Comentario | -|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **E011** | **Tipos de datos numéricos** | **Parcial**{.text-warning} | | -| E011-01 | Tipos de datos INTEGER y SMALLINT | Sí {.text-success} | | -| E011-02 | REAL, DOUBLE PRECISION y FLOAT tipos de datos tipos de datos | Parcial {.text-warning} | `FLOAT()`, `REAL` y `DOUBLE PRECISION` no son compatibles | -| E011-03 | Tipos de datos DECIMAL y NUMERIC | Parcial {.text-warning} | Solo `DECIMAL(p,s)` es compatible, no `NUMERIC` | -| E011-04 | Operadores aritméticos | Sí {.text-success} | | -| E011-05 | Comparación numérica | Sí {.text-success} | | -| E011-06 | Conversión implícita entre los tipos de datos numéricos | No {.text-danger} | ANSI SQL permite la conversión implícita arbitraria entre tipos numéricos, mientras que ClickHouse se basa en funciones que tienen múltiples sobrecargas en lugar de conversión implícita | -| **E021** | **Tipos de cadena de caracteres** | **Parcial**{.text-warning} | | -| E021-01 | Tipo de datos CHARACTER | No {.text-danger} | | -| E021-02 | Tipo de datos CHARACTER VARYING | No {.text-danger} | `String` se comporta de manera similar, pero sin límite de longitud entre paréntesis | -| E021-03 | Literales de caracteres | Parcial {.text-warning} | Sin concatenación automática de literales consecutivos y compatibilidad con el conjunto de caracteres | -| E021-04 | Función CHARACTER_LENGTH | Parcial {.text-warning} | No `USING` clausula | -| E021-05 | Función OCTET_LENGTH | No {.text-danger} | `LENGTH` se comporta de manera similar | -| E021-06 | SUBSTRING | Parcial {.text-warning} | No hay soporte para `SIMILAR` y `ESCAPE` cláusulas, no `SUBSTRING_REGEX` variante | -| E021-07 | Concatenación de caracteres | Parcial {.text-warning} | No `COLLATE` clausula | -| E021-08 | Funciones SUPERIOR e INFERIOR | Sí {.text-success} | | -| E021-09 | Función TRIM | Sí {.text-success} | | -| E021-10 | Conversión implícita entre los tipos de cadena de caracteres de longitud fija y longitud variable | No {.text-danger} | ANSI SQL permite la conversión implícita arbitraria entre tipos de cadena, mientras que ClickHouse se basa en funciones que tienen múltiples sobrecargas en lugar de conversión implícita | -| E021-11 | Función POSITION | Parcial {.text-warning} | No hay soporte para `IN` y `USING` cláusulas, no `POSITION_REGEX` variante | -| E021-12 | Comparación de caracteres | Sí {.text-success} | | -| **E031** | **Identificador** | **Parcial**{.text-warning} | | -| E031-01 | Identificadores delimitados | Parcial {.text-warning} | El soporte literal Unicode es limitado | -| E031-02 | Identificadores de minúsculas | Sí {.text-success} | | -| E031-03 | Trailing subrayado | Sí {.text-success} | | -| **E051** | **Especificación básica de la consulta** | **Parcial**{.text-warning} | | -| E051-01 | SELECT DISTINCT | Sí {.text-success} | | -| E051-02 | Cláusula GROUP BY | Sí {.text-success} | | -| E051-04 | GROUP BY puede contener columnas que no estén en `` | Oui {.text-success} | | -| E051-05 | Les éléments sélectionnés peuvent être renommés | Oui {.text-success} | | -| E051-06 | Clause HAVING | Oui {.text-success} | | -| E051-07 | Qualifié \* dans la liste select | Oui {.text-success} | | -| E051-08 | Nom de corrélation dans la clause FROM | Oui {.text-success} | | -| E051-09 | Renommer les colonnes de la clause FROM | Aucun {.text-danger} | | -| **E061** | **Prédicats de base et conditions de recherche** | **Partiel**{.text-warning} | | -| E061-01 | Prédicat de comparaison | Oui {.text-success} | | -| E061-02 | Entre prédicat | Partiel {.text-warning} | Aucun `SYMMETRIC` et `ASYMMETRIC` clause | -| E061-03 | Dans le prédicat avec la liste des valeurs | Oui {.text-success} | | -| E061-04 | Comme prédicat | Oui {.text-success} | | -| E061-05 | Comme prédicat: clause D'échappement | Aucun {.text-danger} | | -| E061-06 | Prédicat NULL | Oui {.text-success} | | -| E061-07 | Prédicat de comparaison quantifié | Aucun {.text-danger} | | -| E061-08 | Existe prédicat | Aucun {.text-danger} | | -| E061-09 | Sous-requêtes dans le prédicat de comparaison | Oui {.text-success} | | -| E061-11 | Sous-requêtes dans dans le prédicat | Oui {.text-success} | | -| E061-12 | Sous-requêtes dans le prédicat de comparaison quantifiée | Aucun {.text-danger} | | -| E061-13 | Sous-requêtes corrélées | Aucun {.text-danger} | | -| E061-14 | Condition de recherche | Oui {.text-success} | | -| **E071** | **Expressions de requête de base** | **Partiel**{.text-warning} | | -| E071-01 | Opérateur de table distinct UNION | Aucun {.text-danger} | | -| E071-02 | Opérateur de table UNION ALL | Oui {.text-success} | | -| E071-03 | Sauf opérateur de table DISTINCT | Aucun {.text-danger} | | -| E071-05 | Les colonnes combinées via les opérateurs de table n'ont pas besoin d'avoir exactement le même type de données | Oui {.text-success} | | -| E071-06 | Tableau des opérateurs dans les sous-requêtes | Oui {.text-success} | | -| **E081** | **Les privilèges de base** | **Partiel**{.text-warning} | Les travaux en cours | -| **E091** | **Les fonctions de jeu** | **Oui**{.text-success} | | -| E091-01 | AVG | Oui {.text-success} | | -| E091-02 | COUNT | Oui {.text-success} | | -| E091-03 | MAX | Oui {.text-success} | | -| E091-04 | MIN | Oui {.text-success} | | -| E091-05 | SUM | Oui {.text-success} | | -| E091-06 | TOUS les quantificateurs | Aucun {.text-danger} | | -| E091-07 | Quantificateur DISTINCT | Partiel {.text-warning} | Toutes les fonctions d'agrégation ne sont pas prises en charge | -| **E101** | **Manipulation des données de base** | **Partiel**{.text-warning} | | -| E101-01 | Insérer une déclaration | Oui {.text-success} | Remarque: la clé primaire dans ClickHouse n'implique pas `UNIQUE` contrainte | -| E101-03 | Déclaration de mise à jour recherchée | Aucun {.text-danger} | Il y a un `ALTER UPDATE` déclaration pour la modification des données de lot | -| E101-04 | Requête de suppression recherchée | Aucun {.text-danger} | Il y a un `ALTER DELETE` déclaration pour la suppression de données par lots | -| **E111** | **Instruction SELECT à une ligne** | **Aucun**{.text-danger} | | -| **E121** | **Prise en charge du curseur de base** | **Aucun**{.text-danger} | | -| E121-01 | DECLARE CURSOR | Aucun {.text-danger} | | -| E121-02 | Les colonnes ORDER BY n'ont pas besoin d'être dans la liste select | Aucun {.text-danger} | | -| E121-03 | Expressions de valeur dans la clause ORDER BY | Aucun {.text-danger} | | -| E121-04 | Instruction OPEN | Aucun {.text-danger} | | -| E121-06 | Déclaration de mise à jour positionnée | Aucun {.text-danger} | | -| E121-07 | Instruction de suppression positionnée | Aucun {.text-danger} | | -| E121-08 | Déclaration de fermeture | Aucun {.text-danger} | | -| E121-10 | Instruction FETCH: implicite suivant | Aucun {.text-danger} | | -| E121-17 | Avec curseurs HOLD | Aucun {.text-danger} | | -| **E131** | **Support de valeur Null (nulls au lieu de valeurs)** | **Partiel**{.text-warning} | Certaines restrictions s'appliquent | -| **E141** | **Contraintes d'intégrité de base** | **Partiel**{.text-warning} | | -| E141-01 | Contraintes non nulles | Oui {.text-success} | Note: `NOT NULL` est implicite pour les colonnes de table par défaut | -| E141-02 | Contrainte UNIQUE de colonnes non nulles | Aucun {.text-danger} | | -| E141-03 | Contraintes de clé primaire | Aucun {.text-danger} | | -| E141-04 | Contrainte de clé étrangère de base avec la valeur par défaut NO ACTION Pour l'action de suppression référentielle et l'action de mise à jour référentielle | Aucun {.text-danger} | | -| E141-06 | Vérifier la contrainte | Oui {.text-success} | | -| E141-07 | Colonne par défaut | Oui {.text-success} | | -| E141-08 | Non NULL déduit sur la clé primaire | Oui {.text-success} | | -| E141-10 | Les noms dans une clé étrangère peut être spécifié dans n'importe quel ordre | Aucun {.text-danger} | | -| **E151** | **Support de Transaction** | **Aucun**{.text-danger} | | -| E151-01 | COMMIT déclaration | Aucun {.text-danger} | | -| E151-02 | Déclaration de restauration | Aucun {.text-danger} | | -| **E152** | **Instruction de transaction set de base** | **Aucun**{.text-danger} | | -| E152-01 | SET TRANSACTION statement: clause sérialisable de niveau D'isolement | Aucun {.text-danger} | | -| E152-02 | SET TRANSACTION statement: clauses en lecture seule et en lecture écriture | Aucun {.text-danger} | | -| **E153** | **Requêtes pouvant être mises à jour avec des sous requêtes** | **Aucun**{.text-danger} | | -| **E161** | **Commentaires SQL en utilisant le premier Double moins** | **Oui**{.text-success} | | -| **E171** | **Support SQLSTATE** | **Aucun**{.text-danger} | | -| **E182** | **Liaison du langage hôte** | **Aucun**{.text-danger} | | -| **F031** | **Manipulation de schéma de base** | **Partiel**{.text-warning} | | -| F031-01 | Instruction CREATE TABLE pour créer des tables de base persistantes | Partiel {.text-warning} | Aucun `SYSTEM VERSIONING`, `ON COMMIT`, `GLOBAL`, `LOCAL`, `PRESERVE`, `DELETE`, `REF IS`, `WITH OPTIONS`, `UNDER`, `LIKE`, `PERIOD FOR` clauses et aucun support pour les types de données résolus par l'utilisateur | -| F031-02 | Instruction créer une vue | Partiel {.text-warning} | Aucun `RECURSIVE`, `CHECK`, `UNDER`, `WITH OPTIONS` clauses et aucun support pour les types de données résolus par l'utilisateur | -| F031-03 | Déclaration de subvention | Oui {.text-success} | | -| F031-04 | ALTER TABLE statement: ajouter une clause de colonne | Partiel {.text-warning} | Pas de support pour `GENERATED` clause et période de temps du système | -| F031-13 | Instruction DROP TABLE: clause RESTRICT | Aucun {.text-danger} | | -| F031-16 | Instruction DROP VIEW: clause RESTRICT | Aucun {.text-danger} | | -| F031-19 | REVOKE statement: clause RESTRICT | Aucun {.text-danger} | | -| **F041** | **Table jointe de base** | **Partiel**{.text-warning} | | -| F041-01 | INNER join (mais pas nécessairement le mot-clé INNER) | Oui {.text-success} | | -| F041-02 | INTÉRIEURE mot-clé | Oui {.text-success} | | -| F041-03 | LEFT OUTER JOIN | Oui {.text-success} | | -| F041-04 | RIGHT OUTER JOIN | Oui {.text-success} | | -| F041-05 | Les jointures externes peuvent être imbriqués | Oui {.text-success} | | -| F041-07 | La table intérieure dans une jointure extérieure gauche ou droite peut également être utilisée dans une jointure intérieure | Oui {.text-success} | | -| F041-08 | Tous les opérateurs de comparaison sont pris en charge (plutôt que juste =) | Aucun {.text-danger} | | -| **F051** | **Date et heure de base** | **Partiel**{.text-warning} | | -| F051-01 | Type de données de DATE (y compris la prise en charge du littéral de DATE) | Partiel {.text-warning} | Aucun littéral | -| F051-02 | TYPE DE DONNÉES DE TEMPS (y compris la prise en charge du littéral de temps) avec une précision de secondes fractionnaires d'au moins 0 | Aucun {.text-danger} | | -| F051-03 | Type de données D'horodatage (y compris la prise en charge du littéral D'horodatage) avec une précision de secondes fractionnaires d'au moins 0 et 6 | Aucun {.text-danger} | `DateTime64` temps fournit des fonctionnalités similaires | -| F051-04 | Prédicat de comparaison sur les types de données DATE, heure et horodatage | Partiel {.text-warning} | Un seul type de données disponible | -| F051-05 | Distribution explicite entre les types datetime et les types de chaînes de caractères | Oui {.text-success} | | -| F051-06 | CURRENT_DATE | Aucun {.text-danger} | `today()` est similaire | -| F051-07 | LOCALTIME | Aucun {.text-danger} | `now()` est similaire | -| F051-08 | LOCALTIMESTAMP | Aucun {.text-danger} | | -| **F081** | **UNION et sauf dans les vues** | **Partiel**{.text-warning} | | -| **F131** | **Groupées des opérations** | **Partiel**{.text-warning} | | -| F131-01 | WHERE, GROUP BY et ayant des clauses prises en charge dans les requêtes avec des vues groupées | Oui {.text-success} | | -| F131-02 | Plusieurs tables prises en charge dans les requêtes avec des vues groupées | Oui {.text-success} | | -| F131-03 | Définir les fonctions prises en charge dans les requêtes groupées vues | Oui {.text-success} | | -| F131-04 | Sous requêtes avec des clauses GROUP BY et HAVING et des vues groupées | Oui {.text-success} | | -| F131-05 | Sélectionnez une seule ligne avec des clauses GROUP BY et HAVING et des vues groupées | Aucun {.text-danger} | | -| **F181** | **Support de module Multiple** | **Aucun**{.text-danger} | | -| **F201** | **Fonction de distribution** | **Oui**{.text-success} | | -| **F221** | **Valeurs par défaut explicites** | **Aucun**{.text-danger} | | -| **F261** | **Expression de cas** | **Oui**{.text-success} | | -| F261-01 | Cas Simple | Oui {.text-success} | | -| F261-02 | Cas recherché | Oui {.text-success} | | -| F261-03 | NULLIF | Oui {.text-success} | | -| F261-04 | COALESCE | Oui {.text-success} | | -| **F311** | **Déclaration de définition de schéma** | **Partiel**{.text-warning} | | -| F311-01 | CREATE SCHEMA | Aucun {.text-danger} | | -| F311-02 | Créer une TABLE pour les tables de base persistantes | Oui {.text-success} | | -| F311-03 | CREATE VIEW | Oui {.text-success} | | -| F311-04 | CREATE VIEW: WITH CHECK OPTION | Aucun {.text-danger} | | -| F311-05 | Déclaration de subvention | Oui {.text-success} | | -| **F471** | **Valeurs de sous-requête scalaire** | **Oui**{.text-success} | | -| **F481** | **Prédicat null étendu** | **Oui**{.text-success} | | -| **F812** | **Base de repérage** | **Aucun**{.text-danger} | | -| **T321** | **Routines SQL-invoked de base** | **Aucun**{.text-danger} | | -| T321-01 | Fonctions définies par l'utilisateur sans surcharge | Aucun {.text-danger} | | -| T321-02 | Procédures stockées définies par l'utilisateur sans surcharge | Aucun {.text-danger} | | -| T321-03 | L'invocation de la fonction | Aucun {.text-danger} | | -| T321-04 | L'instruction d'APPEL de | Aucun {.text-danger} | | -| T321-05 | Déclaration de retour | Aucun {.text-danger} | | -| **T631** | **Dans le prédicat avec un élément de liste** | **Oui**{.text-success} | | diff --git a/docs/fr/sql-reference/data-types/aggregatefunction.md b/docs/fr/sql-reference/data-types/aggregatefunction.md deleted file mode 100644 index 18874cd3cb7..00000000000 --- a/docs/fr/sql-reference/data-types/aggregatefunction.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 52 -toc_title: AggregateFunction (nom, types_of_arguments...) ---- - -# AggregateFunction(name, types_of_arguments…) {#data-type-aggregatefunction} - -Aggregate functions can have an implementation-defined intermediate state that can be serialized to an AggregateFunction(…) data type and stored in a table, usually, by means of [une vue matérialisée](../../sql-reference/statements/create.md#create-view). La manière courante de produire un État de fonction d'agrégat est d'appeler la fonction d'agrégat avec le `-State` suffixe. Pour obtenir le résultat final de l'agrégation dans l'avenir, vous devez utiliser la même fonction d'agrégation avec la `-Merge`suffixe. - -`AggregateFunction` — parametric data type. - -**Paramètre** - -- Nom de la fonction d'agrégation. - - If the function is parametric, specify its parameters too. - -- Types des arguments de la fonction d'agrégation. - -**Exemple** - -``` sql -CREATE TABLE t -( - column1 AggregateFunction(uniq, UInt64), - column2 AggregateFunction(anyIf, String, UInt8), - column3 AggregateFunction(quantiles(0.5, 0.9), UInt64) -) ENGINE = ... -``` - -[uniq](../../sql-reference/aggregate-functions/reference.md#agg_function-uniq), anyIf ([tout](../../sql-reference/aggregate-functions/reference.md#agg_function-any)+[Si](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-if)) et [les quantiles](../../sql-reference/aggregate-functions/reference.md) les fonctions d'agrégation sont-elles prises en charge dans ClickHouse. - -## Utilisation {#usage} - -### Insertion De Données {#data-insertion} - -Pour insérer des données, utilisez `INSERT SELECT` avec le regroupement d' `-State`- fonction. - -**Exemples de fonction** - -``` sql -uniqState(UserID) -quantilesState(0.5, 0.9)(SendTiming) -``` - -Contrairement aux fonctions correspondantes `uniq` et `quantiles`, `-State`- les fonctions renvoient l'état, au lieu de la valeur finale. En d'autres termes, ils renvoient une valeur de `AggregateFunction` type. - -Dans les résultats de `SELECT` requête, les valeurs de `AggregateFunction` type ont une représentation binaire spécifique à l'implémentation pour tous les formats de sortie ClickHouse. Si les données de vidage dans, par exemple, `TabSeparated` format avec `SELECT` requête, puis ce vidage peut être chargé en utilisant `INSERT` requête. - -### Sélection De Données {#data-selection} - -Lors de la sélection des données `AggregatingMergeTree` table, utilisez `GROUP BY` et les mêmes fonctions d'agrégat que lors de l'insertion de données, mais en utilisant `-Merge`suffixe. - -Une fonction d'agrégation avec `-Merge` suffixe prend un ensemble d'états, les combine, et renvoie le résultat complet de l'agrégation de données. - -Par exemple, les deux requêtes suivantes retournent le même résultat: - -``` sql -SELECT uniq(UserID) FROM table - -SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP BY RegionID) -``` - -## Exemple D'Utilisation {#usage-example} - -Voir [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) Description du moteur. - -[Article Original](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/fr/sql-reference/data-types/array.md b/docs/fr/sql-reference/data-types/array.md deleted file mode 100644 index 41772cab177..00000000000 --- a/docs/fr/sql-reference/data-types/array.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 51 -toc_title: Array(T) ---- - -# Array(t) {#data-type-array} - -Un tableau de `T`les éléments de type. `T` peut être n'importe quel type de données, y compris un tableau. - -## La création d'un Tableau {#creating-an-array} - -Vous pouvez utiliser une fonction pour créer un tableau: - -``` sql -array(T) -``` - -Vous pouvez également utiliser des crochets. - -``` sql -[] -``` - -Exemple de création d'un tableau: - -``` sql -SELECT array(1, 2) AS x, toTypeName(x) -``` - -``` text -┌─x─────┬─toTypeName(array(1, 2))─┐ -│ [1,2] │ Array(UInt8) │ -└───────┴─────────────────────────┘ -``` - -``` sql -SELECT [1, 2] AS x, toTypeName(x) -``` - -``` text -┌─x─────┬─toTypeName([1, 2])─┐ -│ [1,2] │ Array(UInt8) │ -└───────┴────────────────────┘ -``` - -## Utilisation de Types de données {#working-with-data-types} - -Lors de la création d'un tableau à la volée, ClickHouse définit automatiquement le type d'argument comme le type de données le plus étroit pouvant stocker tous les arguments listés. S'il y a des [Nullable](nullable.md#data_type-nullable) ou littéral [NULL](../../sql-reference/syntax.md#null-literal) les valeurs, le type d'un élément de tableau devient également [Nullable](nullable.md). - -Si ClickHouse n'a pas pu déterminer le type de données, il génère une exception. Par exemple, cela se produit lorsque vous essayez de créer un tableau avec des chaînes et des nombres simultanément (`SELECT array(1, 'a')`). - -Exemples de détection automatique de type de données: - -``` sql -SELECT array(1, 2, NULL) AS x, toTypeName(x) -``` - -``` text -┌─x──────────┬─toTypeName(array(1, 2, NULL))─┐ -│ [1,2,NULL] │ Array(Nullable(UInt8)) │ -└────────────┴───────────────────────────────┘ -``` - -Si vous essayez de créer un tableau de types de données incompatibles, ClickHouse lève une exception: - -``` sql -SELECT array(1, 'a') -``` - -``` text -Received exception from server (version 1.1.54388): -Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. -``` - -[Article Original](https://clickhouse.tech/docs/en/data_types/array/) diff --git a/docs/fr/sql-reference/data-types/boolean.md b/docs/fr/sql-reference/data-types/boolean.md deleted file mode 100644 index aeb84cf1cc1..00000000000 --- a/docs/fr/sql-reference/data-types/boolean.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 43 -toc_title: "Bool\xE9en" ---- - -# Les Valeurs Booléennes {#boolean-values} - -Il n'y a pas de type distinct pour les valeurs booléennes. Utilisez le type UInt8, limité aux valeurs 0 ou 1. - -[Article Original](https://clickhouse.tech/docs/en/data_types/boolean/) diff --git a/docs/fr/sql-reference/data-types/date.md b/docs/fr/sql-reference/data-types/date.md deleted file mode 100644 index 698639f1d2f..00000000000 --- a/docs/fr/sql-reference/data-types/date.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 47 -toc_title: Date ---- - -# Date {#date} - -Date. Stocké en deux octets comme le nombre de jours depuis 1970-01-01 (non signé). Permet de stocker des valeurs juste après le début de L'époque Unix jusqu'au seuil supérieur défini par une constante au stade de la compilation (actuellement, c'est jusqu'à l'année 2106, mais l'année finale entièrement prise en charge est 2105). - -La valeur de date est stockée sans le fuseau horaire. - -[Article Original](https://clickhouse.tech/docs/en/data_types/date/) diff --git a/docs/fr/sql-reference/data-types/datetime.md b/docs/fr/sql-reference/data-types/datetime.md deleted file mode 100644 index 915270e4d2b..00000000000 --- a/docs/fr/sql-reference/data-types/datetime.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 48 -toc_title: DateTime ---- - -# Datetime {#data_type-datetime} - -Permet de stocker un instant dans le temps, qui peut être exprimé comme une date de calendrier et une heure d'une journée. - -Syntaxe: - -``` sql -DateTime([timezone]) -``` - -Plage de valeurs prise en charge: \[1970-01-01 00:00:00, 2105-12-31 23:59:59\]. - -Résolution: 1 seconde. - -## Utilisation Remarques {#usage-remarks} - -Le point dans le temps est enregistré en tant que [Le timestamp Unix](https://en.wikipedia.org/wiki/Unix_time), quel que soit le fuseau horaire ou l'heure d'été. En outre, l' `DateTime` type peut stocker le fuseau horaire qui est le même pour la colonne entière, qui affecte la façon dont les valeurs de la `DateTime` les valeurs de type sont affichées au format texte et comment les valeurs spécifiées en tant que chaînes sont analysées (‘2020-01-01 05:00:01’). Le fuseau horaire n'est pas stocké dans les lignes de la table (ou dans resultset), mais est stocké dans les métadonnées de la colonne. -Une liste des fuseaux horaires pris en charge peut être trouvée dans le [Base de données de fuseau horaire IANA](https://www.iana.org/time-zones). -Le `tzdata` paquet, contenant [Base de données de fuseau horaire IANA](https://www.iana.org/time-zones), doit être installé dans le système. L'utilisation de la `timedatectl list-timezones` commande pour lister les fuseaux horaires connus par un système local. - -Vous pouvez définir explicitement un fuseau horaire `DateTime`- tapez des colonnes lors de la création d'une table. Si le fuseau horaire n'est pas défini, ClickHouse utilise la valeur [fuseau](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) paramètre dans les paramètres du serveur ou les paramètres du système d'exploitation au moment du démarrage du serveur ClickHouse. - -Le [clickhouse-client](../../interfaces/cli.md) applique le fuseau horaire du serveur par défaut si un fuseau horaire n'est pas explicitement défini lors de l'initialisation du type de données. Pour utiliser le fuseau horaire du client, exécutez `clickhouse-client` avec l' `--use_client_time_zone` paramètre. - -Clickhouse affiche les valeurs dans `YYYY-MM-DD hh:mm:ss` format de texte par défaut. Vous pouvez modifier la sortie avec le [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) fonction. - -Lorsque vous insérez des données dans ClickHouse, vous pouvez utiliser différents formats de chaînes de date et d'heure, en fonction de la valeur du [date_time_input_format](../../operations/settings/settings.md#settings-date_time_input_format) paramètre. - -## Exemple {#examples} - -**1.** Création d'une table avec un `DateTime`- tapez la colonne et insérez des données dedans: - -``` sql -CREATE TABLE dt -( - `timestamp` DateTime('Europe/Moscow'), - `event_id` UInt8 -) -ENGINE = TinyLog; -``` - -``` sql -INSERT INTO dt Values (1546300800, 1), ('2019-01-01 00:00:00', 2); -``` - -``` sql -SELECT * FROM dt; -``` - -``` text -┌───────────timestamp─┬─event_id─┐ -│ 2019-01-01 03:00:00 │ 1 │ -│ 2019-01-01 00:00:00 │ 2 │ -└─────────────────────┴──────────┘ -``` - -- Lors de l'insertion de datetime en tant qu'entier, il est traité comme un horodatage Unix (UTC). `1546300800` représenter `'2019-01-01 00:00:00'` L'UTC. Cependant, comme `timestamp` la colonne a `Europe/Moscow` (UTC+3) fuseau horaire spécifié, lors de la sortie en tant que chaîne, la valeur sera affichée comme `'2019-01-01 03:00:00'` -- Lors de l'insertion d'une valeur de chaîne en tant que datetime, elle est traitée comme étant dans le fuseau horaire de la colonne. `'2019-01-01 00:00:00'` sera considérée comme étant en `Europe/Moscow` fuseau horaire et enregistré sous `1546290000`. - -**2.** Le filtrage sur `DateTime` valeur - -``` sql -SELECT * FROM dt WHERE timestamp = toDateTime('2019-01-01 00:00:00', 'Europe/Moscow') -``` - -``` text -┌───────────timestamp─┬─event_id─┐ -│ 2019-01-01 00:00:00 │ 2 │ -└─────────────────────┴──────────┘ -``` - -`DateTime` les valeurs de colonne peuvent être filtrées à l'aide d'une `WHERE` prédicat. Elle sera convertie `DateTime` automatiquement: - -``` sql -SELECT * FROM dt WHERE timestamp = '2019-01-01 00:00:00' -``` - -``` text -┌───────────timestamp─┬─event_id─┐ -│ 2019-01-01 03:00:00 │ 1 │ -└─────────────────────┴──────────┘ -``` - -**3.** Obtenir un fuseau horaire pour un `DateTime`colonne de type: - -``` sql -SELECT toDateTime(now(), 'Europe/Moscow') AS column, toTypeName(column) AS x -``` - -``` text -┌──────────────column─┬─x─────────────────────────┐ -│ 2019-10-16 04:12:04 │ DateTime('Europe/Moscow') │ -└─────────────────────┴───────────────────────────┘ -``` - -**4.** Conversion de fuseau horaire - -``` sql -SELECT -toDateTime(timestamp, 'Europe/London') as lon_time, -toDateTime(timestamp, 'Europe/Moscow') as mos_time -FROM dt -``` - -``` text -┌───────────lon_time──┬────────────mos_time─┐ -│ 2019-01-01 00:00:00 │ 2019-01-01 03:00:00 │ -│ 2018-12-31 21:00:00 │ 2019-01-01 00:00:00 │ -└─────────────────────┴─────────────────────┘ -``` - -## Voir Aussi {#see-also} - -- [Fonctions de conversion de Type](../../sql-reference/functions/type-conversion-functions.md) -- [Fonctions pour travailler avec des dates et des heures](../../sql-reference/functions/date-time-functions.md) -- [Fonctions pour travailler avec des tableaux](../../sql-reference/functions/array-functions.md) -- [Le `date_time_input_format` paramètre](../../operations/settings/settings.md#settings-date_time_input_format) -- [Le `timezone` paramètre de configuration du serveur](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) -- [Opérateurs pour travailler avec des dates et des heures](../../sql-reference/operators/index.md#operators-datetime) -- [Le `Date` type de données](date.md) - -[Article Original](https://clickhouse.tech/docs/en/data_types/datetime/) diff --git a/docs/fr/sql-reference/data-types/datetime64.md b/docs/fr/sql-reference/data-types/datetime64.md deleted file mode 100644 index 027891c595d..00000000000 --- a/docs/fr/sql-reference/data-types/datetime64.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 49 -toc_title: DateTime64 ---- - -# Datetime64 {#data_type-datetime64} - -Permet de stocker un instant dans le temps, qui peut être exprimé comme une date de calendrier et une heure d'un jour, avec une précision de sous-seconde définie - -Tick taille (précision): 10-précision deuxième - -Syntaxe: - -``` sql -DateTime64(precision, [timezone]) -``` - -En interne, stocke les données comme un certain nombre de ‘ticks’ depuis le début de l'époque (1970-01-01 00: 00: 00 UTC) comme Int64. La résolution des tiques est déterminée par le paramètre de précision. En outre, l' `DateTime64` type peut stocker le fuseau horaire qui est le même pour la colonne entière, qui affecte la façon dont les valeurs de la `DateTime64` les valeurs de type sont affichées au format texte et comment les valeurs spécifiées en tant que chaînes sont analysées (‘2020-01-01 05:00:01.000’). Le fuseau horaire n'est pas stocké dans les lignes de la table (ou dans resultset), mais est stocké dans les métadonnées de la colonne. Voir les détails dans [DateTime](datetime.md). - -## Exemple {#examples} - -**1.** Création d'une table avec `DateTime64`- tapez la colonne et insérez des données dedans: - -``` sql -CREATE TABLE dt -( - `timestamp` DateTime64(3, 'Europe/Moscow'), - `event_id` UInt8 -) -ENGINE = TinyLog -``` - -``` sql -INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) -``` - -``` sql -SELECT * FROM dt -``` - -``` text -┌───────────────timestamp─┬─event_id─┐ -│ 2019-01-01 03:00:00.000 │ 1 │ -│ 2019-01-01 00:00:00.000 │ 2 │ -└─────────────────────────┴──────────┘ -``` - -- Lors de l'insertion de datetime en tant qu'entier, il est traité comme un horodatage Unix (UTC) mis à l'échelle de manière appropriée. `1546300800000` (avec précision 3) représente `'2019-01-01 00:00:00'` L'UTC. Cependant, comme `timestamp` la colonne a `Europe/Moscow` (UTC+3) fuseau horaire spécifié, lors de la sortie sous forme de chaîne, la valeur sera affichée comme `'2019-01-01 03:00:00'` -- Lors de l'insertion d'une valeur de chaîne en tant que datetime, elle est traitée comme étant dans le fuseau horaire de la colonne. `'2019-01-01 00:00:00'` sera considérée comme étant en `Europe/Moscow` fuseau horaire et stocké comme `1546290000000`. - -**2.** Le filtrage sur `DateTime64` valeur - -``` sql -SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') -``` - -``` text -┌───────────────timestamp─┬─event_id─┐ -│ 2019-01-01 00:00:00.000 │ 2 │ -└─────────────────────────┴──────────┘ -``` - -Contrairement `DateTime`, `DateTime64` les valeurs ne sont pas converties depuis `String` automatiquement - -**3.** Obtenir un fuseau horaire pour un `DateTime64`-le type de la valeur: - -``` sql -SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x -``` - -``` text -┌──────────────────column─┬─x──────────────────────────────┐ -│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ -└─────────────────────────┴────────────────────────────────┘ -``` - -**4.** Conversion de fuseau horaire - -``` sql -SELECT -toDateTime64(timestamp, 3, 'Europe/London') as lon_time, -toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time -FROM dt -``` - -``` text -┌───────────────lon_time──┬────────────────mos_time─┐ -│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ -│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ -└─────────────────────────┴─────────────────────────┘ -``` - -## Voir Aussi {#see-also} - -- [Fonctions de conversion de Type](../../sql-reference/functions/type-conversion-functions.md) -- [Fonctions pour travailler avec des dates et des heures](../../sql-reference/functions/date-time-functions.md) -- [Fonctions pour travailler avec des tableaux](../../sql-reference/functions/array-functions.md) -- [Le `date_time_input_format` paramètre](../../operations/settings/settings.md#settings-date_time_input_format) -- [Le `timezone` paramètre de configuration du serveur](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) -- [Opérateurs pour travailler avec des dates et des heures](../../sql-reference/operators/index.md#operators-datetime) -- [`Date` type de données](date.md) -- [`DateTime` type de données](datetime.md) diff --git a/docs/fr/sql-reference/data-types/decimal.md b/docs/fr/sql-reference/data-types/decimal.md deleted file mode 100644 index 171bc1cf6dd..00000000000 --- a/docs/fr/sql-reference/data-types/decimal.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 42 -toc_title: "D\xE9cimal" ---- - -# Décimal (P, S), Décimal32 (S), Décimal64 (S), Décimal128 (S) {#decimalp-s-decimal32s-decimal64s-decimal128s} - -Nombres à points fixes signés qui conservent la précision pendant les opérations d'addition, de soustraction et de multiplication. Pour la division, les chiffres les moins significatifs sont ignorés (non arrondis). - -## Paramètre {#parameters} - -- P-précision. Plage valide: \[1: 38 \]. Détermine le nombre de chiffres décimaux nombre peut avoir (fraction y compris). -- S - échelle. Plage valide: \[0: P \]. Détermine le nombre de chiffres décimaux fraction peut avoir. - -En fonction de P Paramètre Valeur décimal (P, S) est un synonyme de: -- P à partir de \[ 1: 9\] - Pour Décimal32 (S) -- P à partir de \[10: 18\] - pour Décimal64 (S) -- P à partir de \[19: 38\] - pour Décimal128 (S) - -## Plages De Valeurs Décimales {#decimal-value-ranges} - -- Décimal32 (S) - ( -1 \* 10^(9 - S), 1 \* 10^(9-S) ) -- Décimal64 (S) - ( -1 \* 10^(18 - S), 1 \* 10^(18-S) ) -- Décimal128 (S) - ( -1 \* 10^(38 - S), 1 \* 10^(38-S) ) - -Par exemple, Decimal32(4) peut contenir des nombres de -99999.9999 à 99999.9999 avec 0,0001 étape. - -## Représentation Interne {#internal-representation} - -En interne, les données sont représentées comme des entiers signés normaux avec une largeur de bit respective. Les plages de valeurs réelles qui peuvent être stockées en mémoire sont un peu plus grandes que celles spécifiées ci-dessus, qui sont vérifiées uniquement lors de la conversion à partir d'une chaîne. - -Parce que les processeurs modernes ne prennent pas en charge les entiers 128 bits nativement, les opérations sur Decimal128 sont émulées. Pour cette raison, Decimal128 fonctionne significativement plus lentement que Decimal32 / Decimal64. - -## Opérations et type de résultat {#operations-and-result-type} - -Les opérations binaires sur le résultat décimal dans le type de résultat plus large (avec n'importe quel ordre d'arguments). - -- `Decimal64(S1) Decimal32(S2) -> Decimal64(S)` -- `Decimal128(S1) Decimal32(S2) -> Decimal128(S)` -- `Decimal128(S1) Decimal64(S2) -> Decimal128(S)` - -Règles pour l'échelle: - -- ajouter, soustraire: S = max (S1, S2). -- multuply: S = S1 + S2. -- diviser: S = S1. - -Pour des opérations similaires entre décimal et entier, le résultat est Décimal de la même taille qu'un argument. - -Les opérations entre Decimal et Float32 / Float64 ne sont pas définies. Si vous en avez besoin, vous pouvez explicitement lancer l'un des arguments en utilisant les builtins toDecimal32, toDecimal64, toDecimal128 ou toFloat32, toFloat64. Gardez à l'esprit que le résultat perdra de la précision et que la conversion de type est une opération coûteuse en calcul. - -Certaines fonctions sur le résultat de retour décimal comme Float64 (par exemple, var ou stddev). Les calculs intermédiaires peuvent toujours être effectués en décimal, ce qui peut conduire à des résultats différents entre les entrées Float64 et Decimal avec les mêmes valeurs. - -## Contrôles De Débordement {#overflow-checks} - -Pendant les calculs sur Décimal, des débordements entiers peuvent se produire. Les chiffres excessifs dans une fraction sont éliminés (non arrondis). Les chiffres excessifs dans la partie entière conduiront à une exception. - -``` sql -SELECT toDecimal32(2, 4) AS x, x / 3 -``` - -``` text -┌──────x─┬─divide(toDecimal32(2, 4), 3)─┐ -│ 2.0000 │ 0.6666 │ -└────────┴──────────────────────────────┘ -``` - -``` sql -SELECT toDecimal32(4.2, 8) AS x, x * x -``` - -``` text -DB::Exception: Scale is out of bounds. -``` - -``` sql -SELECT toDecimal32(4.2, 8) AS x, 6 * x -``` - -``` text -DB::Exception: Decimal math overflow. -``` - -Les contrôles de débordement entraînent un ralentissement des opérations. S'il est connu que les débordements ne sont pas possibles, il est logique de désactiver les contrôles en utilisant `decimal_check_overflow` paramètre. Lorsque des contrôles sont désactivés et le débordement se produit, le résultat sera faux: - -``` sql -SET decimal_check_overflow = 0; -SELECT toDecimal32(4.2, 8) AS x, 6 * x -``` - -``` text -┌──────────x─┬─multiply(6, toDecimal32(4.2, 8))─┐ -│ 4.20000000 │ -17.74967296 │ -└────────────┴──────────────────────────────────┘ -``` - -Les contrôles de débordement se produisent non seulement sur les opérations arithmétiques mais aussi sur la comparaison de valeurs: - -``` sql -SELECT toDecimal32(1, 8) < 100 -``` - -``` text -DB::Exception: Can't compare. -``` - -[Article Original](https://clickhouse.tech/docs/en/data_types/decimal/) diff --git a/docs/fr/sql-reference/data-types/domains/index.md b/docs/fr/sql-reference/data-types/domains/index.md deleted file mode 100644 index 7e11f9a8a68..00000000000 --- a/docs/fr/sql-reference/data-types/domains/index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Domaine -toc_priority: 56 -toc_title: "Aper\xE7u" ---- - -# Domaine {#domains} - -Les domaines sont des types spéciaux qui ajoutent des fonctionnalités supplémentaires au sommet du type de base existant, mais en laissant le format on-wire et on-disc du type de données sous-jacent intact. À l'heure actuelle, ClickHouse ne prend pas en charge les domaines définis par l'utilisateur. - -Vous pouvez utiliser des domaines partout type de base correspondant peut être utilisé, par exemple: - -- Créer une colonne d'un type de domaine -- Valeurs de lecture / écriture depuis / vers la colonne de domaine -- L'utiliser comme un indice si un type de base peut être utilisée comme un indice -- Fonctions d'appel avec des valeurs de colonne de domaine - -### Fonctionnalités supplémentaires des domaines {#extra-features-of-domains} - -- Nom de type de colonne explicite dans `SHOW CREATE TABLE` ou `DESCRIBE TABLE` -- Entrée du format convivial avec `INSERT INTO domain_table(domain_column) VALUES(...)` -- Sortie au format convivial pour `SELECT domain_column FROM domain_table` -- Chargement de données à partir d'une source externe dans un format convivial: `INSERT INTO domain_table FORMAT CSV ...` - -### Limitation {#limitations} - -- Impossible de convertir la colonne d'index du type de base en type de domaine via `ALTER TABLE`. -- Impossible de convertir implicitement des valeurs de chaîne en valeurs de domaine lors de l'insertion de données d'une autre colonne ou table. -- Le domaine n'ajoute aucune contrainte sur les valeurs stockées. - -[Article Original](https://clickhouse.tech/docs/en/data_types/domains/overview) diff --git a/docs/fr/sql-reference/data-types/domains/ipv4.md b/docs/fr/sql-reference/data-types/domains/ipv4.md deleted file mode 100644 index 12895992e77..00000000000 --- a/docs/fr/sql-reference/data-types/domains/ipv4.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 59 -toc_title: IPv4 ---- - -## IPv4 {#ipv4} - -`IPv4` est un domaine basé sur `UInt32` tapez et sert de remplacement typé pour stocker des valeurs IPv4. Il fournit un stockage compact avec le format d'entrée-sortie convivial et les informations de type de colonne sur l'inspection. - -### Utilisation De Base {#basic-usage} - -``` sql -CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY url; - -DESCRIBE TABLE hits; -``` - -``` text -┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ -│ url │ String │ │ │ │ │ -│ from │ IPv4 │ │ │ │ │ -└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ -``` - -Ou vous pouvez utiliser le domaine IPv4 comme clé: - -``` sql -CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; -``` - -`IPv4` le domaine prend en charge le format d'entrée personnalisé en tant que chaînes IPv4: - -``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); - -SELECT * FROM hits; -``` - -``` text -┌─url────────────────────────────────┬───────────from─┐ -│ https://clickhouse.tech/docs/en/ │ 116.106.34.242 │ -│ https://wikipedia.org │ 116.253.40.133 │ -│ https://clickhouse.tech │ 183.247.232.58 │ -└────────────────────────────────────┴────────────────┘ -``` - -Les valeurs sont stockées sous forme binaire compacte: - -``` sql -SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(from)─┬─hex(from)─┐ -│ IPv4 │ B7F7E83A │ -└──────────────────┴───────────┘ -``` - -Les valeurs de domaine ne sont pas implicitement convertibles en types autres que `UInt32`. -Si vous voulez convertir `IPv4` valeur à une chaîne, vous devez le faire explicitement avec `IPv4NumToString()` fonction: - -``` sql -SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; -``` - - ┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ - │ String │ 183.247.232.58 │ - └───────────────────────────────────┴────────────────┘ - -Ou coulé à un `UInt32` valeur: - -``` sql -SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ -│ UInt32 │ 3086477370 │ -└──────────────────────────────────┴────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/data_types/domains/ipv4) diff --git a/docs/fr/sql-reference/data-types/domains/ipv6.md b/docs/fr/sql-reference/data-types/domains/ipv6.md deleted file mode 100644 index 77510a950cb..00000000000 --- a/docs/fr/sql-reference/data-types/domains/ipv6.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 60 -toc_title: IPv6 ---- - -## IPv6 {#ipv6} - -`IPv6` est un domaine basé sur `FixedString(16)` tapez et sert de remplacement typé pour stocker des valeurs IPv6. Il fournit un stockage compact avec le format d'entrée-sortie convivial et les informations de type de colonne sur l'inspection. - -### Utilisation De Base {#basic-usage} - -``` sql -CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url; - -DESCRIBE TABLE hits; -``` - -``` text -┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ -│ url │ String │ │ │ │ │ -│ from │ IPv6 │ │ │ │ │ -└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ -``` - -Ou vous pouvez utiliser `IPv6` domaine comme l'un des principaux: - -``` sql -CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; -``` - -`IPv6` le domaine prend en charge l'entrée personnalisée en tant que chaînes IPv6: - -``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); - -SELECT * FROM hits; -``` - -``` text -┌─url────────────────────────────────┬─from──────────────────────────┐ -│ https://clickhouse.tech │ 2001:44c8:129:2632:33:0:252:2 │ -│ https://clickhouse.tech/docs/en/ │ 2a02:e980:1e::1 │ -│ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │ -└────────────────────────────────────┴───────────────────────────────┘ -``` - -Les valeurs sont stockées sous forme binaire compacte: - -``` sql -SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(from)─┬─hex(from)────────────────────────┐ -│ IPv6 │ 200144C8012926320033000002520002 │ -└──────────────────┴──────────────────────────────────┘ -``` - -Les valeurs de domaine ne sont pas implicitement convertibles en types autres que `FixedString(16)`. -Si vous voulez convertir `IPv6` valeur à une chaîne, vous devez le faire explicitement avec `IPv6NumToString()` fonction: - -``` sql -SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐ -│ String │ 2001:44c8:129:2632:33:0:252:2 │ -└───────────────────────────────────┴───────────────────────────────┘ -``` - -Ou coulé à un `FixedString(16)` valeur: - -``` sql -SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐ -│ FixedString(16) │ ��� │ -└───────────────────────────────────────────┴─────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/data_types/domains/ipv6) diff --git a/docs/fr/sql-reference/data-types/enum.md b/docs/fr/sql-reference/data-types/enum.md deleted file mode 100644 index b9751c1c804..00000000000 --- a/docs/fr/sql-reference/data-types/enum.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 50 -toc_title: Enum ---- - -# Enum {#enum} - -Type énuméré composé de valeurs nommées. - -Les valeurs nommées doivent être déclarées comme `'string' = integer` pair. ClickHouse ne stocke que des nombres, mais prend en charge les opérations avec les valeurs à travers leurs noms. - -Supports ClickHouse: - -- 8-bit `Enum`. Il peut contenir jusqu'à 256 valeurs énumérées dans le `[-128, 127]` gamme. -- 16 bits `Enum`. Il peut contenir jusqu'à 65 536 valeurs énumérées dans le `[-32768, 32767]` gamme. - -Clickhouse choisit automatiquement le type de `Enum` lorsque les données sont insérées. Vous pouvez également utiliser `Enum8` ou `Enum16` types pour être sûr de la taille de stockage. - -## Exemples D'Utilisation {#usage-examples} - -Ici, nous créons une table avec une `Enum8('hello' = 1, 'world' = 2)` type de colonne: - -``` sql -CREATE TABLE t_enum -( - x Enum('hello' = 1, 'world' = 2) -) -ENGINE = TinyLog -``` - -Colonne `x` ne peut stocker que les valeurs répertoriées dans la définition de type: `'hello'` ou `'world'`. Si vous essayez d'enregistrer une autre valeur, ClickHouse déclenchera une exception. Taille 8 bits pour cela `Enum` est choisi automatiquement. - -``` sql -INSERT INTO t_enum VALUES ('hello'), ('world'), ('hello') -``` - -``` text -Ok. -``` - -``` sql -INSERT INTO t_enum values('a') -``` - -``` text -Exception on client: -Code: 49. DB::Exception: Unknown element 'a' for type Enum('hello' = 1, 'world' = 2) -``` - -Lorsque vous interrogez des données de la table, ClickHouse affiche les valeurs de chaîne de `Enum`. - -``` sql -SELECT * FROM t_enum -``` - -``` text -┌─x─────┐ -│ hello │ -│ world │ -│ hello │ -└───────┘ -``` - -Si vous avez besoin de voir les équivalents numériques des lignes, vous devez `Enum` valeur en type entier. - -``` sql -SELECT CAST(x, 'Int8') FROM t_enum -``` - -``` text -┌─CAST(x, 'Int8')─┐ -│ 1 │ -│ 2 │ -│ 1 │ -└─────────────────┘ -``` - -Pour créer une valeur d'Enum dans une requête, vous devez également utiliser `CAST`. - -``` sql -SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) -``` - -``` text -┌─toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)'))─┐ -│ Enum8('a' = 1, 'b' = 2) │ -└─────────────────────────────────────────────────────┘ -``` - -## Règles générales et utilisation {#general-rules-and-usage} - -Chacune des valeurs se voit attribuer un nombre dans la plage `-128 ... 127` pour `Enum8` ou dans la gamme `-32768 ... 32767` pour `Enum16`. Toutes les chaînes et les nombres doivent être différents. Une chaîne vide est autorisé. Si ce type est spécifié (dans une définition de table), les nombres peuvent être dans un ordre arbitraire. Toutefois, l'ordre n'a pas d'importance. - -Ni la chaîne ni la valeur numérique dans un `Enum` peut être [NULL](../../sql-reference/syntax.md). - -Un `Enum` peut être contenue dans [Nullable](nullable.md) type. Donc, si vous créez une table en utilisant la requête - -``` sql -CREATE TABLE t_enum_nullable -( - x Nullable( Enum8('hello' = 1, 'world' = 2) ) -) -ENGINE = TinyLog -``` - -il peut stocker non seulement des `'hello'` et `'world'`, mais `NULL`, ainsi. - -``` sql -INSERT INTO t_enum_nullable Values('hello'),('world'),(NULL) -``` - -Dans la mémoire RAM, un `Enum` la colonne est stockée dans la même manière que `Int8` ou `Int16` des valeurs numériques correspondantes. - -Lors de la lecture sous forme de texte, ClickHouse analyse la valeur sous forme de chaîne et recherche la chaîne correspondante à partir de l'ensemble des valeurs Enum. Si elle n'est pas trouvée, une exception est levée. Lors de la lecture au format texte, la chaîne est lue et la valeur numérique correspondante est recherchée. Une exception sera levée si il n'est pas trouvé. -Lors de l'écriture sous forme de texte, il écrit la valeur correspondante de la chaîne. Si les données de colonne contiennent des déchets (nombres qui ne proviennent pas de l'ensemble valide), une exception est levée. Lors de la lecture et de l'écriture sous forme binaire, cela fonctionne de la même manière que pour les types de données Int8 et Int16. -La valeur implicite par défaut est la valeur avec le numéro le plus bas. - -Lors `ORDER BY`, `GROUP BY`, `IN`, `DISTINCT` et ainsi de suite, les Énumérations se comportent de la même façon que les nombres correspondants. Par exemple, ORDER BY les trie numériquement. Les opérateurs d'égalité et de comparaison fonctionnent de la même manière sur les énumérations que sur les valeurs numériques sous-jacentes. - -Les valeurs Enum ne peuvent pas être comparées aux nombres. Les Enums peuvent être comparés à une chaîne constante. Si la chaîne comparée à n'est pas une valeur valide pour L'énumération, une exception sera levée. L'opérateur est pris en charge avec l'Enum sur le côté gauche, et un ensemble de chaînes sur le côté droit. Les chaînes sont les valeurs de L'énumération correspondante. - -Most numeric and string operations are not defined for Enum values, e.g. adding a number to an Enum or concatenating a string to an Enum. -Cependant, L'énumération a un naturel `toString` fonction qui renvoie sa valeur de chaîne. - -Les valeurs Enum sont également convertibles en types numériques en utilisant `toT` fonction, où T est un type numérique. Lorsque T correspond au type numérique sous-jacent de l'énumération, cette conversion est à coût nul. -Le type Enum peut être modifié sans coût en utilisant ALTER, si seulement l'ensemble des valeurs est modifié. Il est possible d'ajouter et de supprimer des membres de L'énumération en utilisant ALTER (la suppression n'est sûre que si la valeur supprimée n'a jamais été utilisée dans la table). À titre de sauvegarde, la modification de la valeur numérique d'un membre Enum précédemment défini lancera une exception. - -En utilisant ALTER, il est possible de changer un Enum8 en Enum16 ou vice versa, tout comme changer un Int8 en Int16. - -[Article Original](https://clickhouse.tech/docs/en/data_types/enum/) diff --git a/docs/fr/sql-reference/data-types/fixedstring.md b/docs/fr/sql-reference/data-types/fixedstring.md deleted file mode 100644 index 5ba09187581..00000000000 --- a/docs/fr/sql-reference/data-types/fixedstring.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 45 -toc_title: FixedString (N) ---- - -# Fixedstring {#fixedstring} - -Une chaîne de longueur fixe de `N` octets (ni caractères ni points de code). - -Pour déclarer une colonne de `FixedString` tapez, utilisez la syntaxe suivante: - -``` sql - FixedString(N) -``` - -Où `N` est un nombre naturel. - -Le `FixedString` type est efficace lorsque les données ont la longueur de précisément `N` octet. Dans tous les autres cas, il est susceptible de réduire l'efficacité. - -Exemples de valeurs qui peuvent être stockées efficacement dans `FixedString`-tapé colonnes: - -- La représentation binaire des adresses IP (`FixedString(16)` pour IPv6). -- Language codes (ru_RU, en_US … ). -- Currency codes (USD, RUB … ). -- Représentation binaire des hachages (`FixedString(16)` pour MD5, `FixedString(32)` pour SHA256). - -Pour stocker les valeurs UUID, utilisez [UUID](uuid.md) type de données. - -Lors de l'insertion des données, ClickHouse: - -- Complète une chaîne avec des octets null si la chaîne contient moins de `N` octet. -- Jette le `Too large value for FixedString(N)` exception si la chaîne contient plus de `N` octet. - -Lors de la sélection des données, ClickHouse ne supprime pas les octets nuls à la fin de la chaîne. Si vous utilisez le `WHERE` clause, vous devez ajouter des octets null manuellement pour `FixedString` valeur. L'exemple suivant illustre l'utilisation de l' `WHERE` la clause de `FixedString`. - -Considérons le tableau suivant avec le seul `FixedString(2)` colonne: - -``` text -┌─name──┐ -│ b │ -└───────┘ -``` - -Requête `SELECT * FROM FixedStringTable WHERE a = 'b'` ne renvoie aucune donnée en conséquence. Nous devrions compléter le modèle de filtre avec des octets nuls. - -``` sql -SELECT * FROM FixedStringTable -WHERE a = 'b\0' -``` - -``` text -┌─a─┐ -│ b │ -└───┘ -``` - -Ce comportement diffère de MySQL pour le `CHAR` type (où les chaînes sont remplies d'espaces et les espaces sont supprimés pour la sortie). - -À noter que la longueur de la `FixedString(N)` la valeur est constante. Le [longueur](../../sql-reference/functions/array-functions.md#array_functions-length) la fonction renvoie `N` même si l' `FixedString(N)` la valeur est remplie uniquement avec des octets [vide](../../sql-reference/functions/string-functions.md#empty) la fonction renvoie `1` dans ce cas. - -[Article Original](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/fr/sql-reference/data-types/float.md b/docs/fr/sql-reference/data-types/float.md deleted file mode 100644 index b269b930110..00000000000 --- a/docs/fr/sql-reference/data-types/float.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 41 -toc_title: Float32, Float64 ---- - -# Float32, Float64 {#float32-float64} - -[Les nombres à virgule flottante](https://en.wikipedia.org/wiki/IEEE_754). - -Les Types sont équivalents aux types de C: - -- `Float32` - `float` -- `Float64` - `double` - -Nous vous recommandons de stocker les données sous forme entière chaque fois que possible. Par exemple, convertissez des nombres de précision fixes en valeurs entières, telles que des montants monétaires ou des temps de chargement de page en millisecondes. - -## Utilisation de nombres à virgule flottante {#using-floating-point-numbers} - -- Calculs avec des nombres à virgule flottante peut produire une erreur d'arrondi. - - - -``` sql -SELECT 1 - 0.9 -``` - -``` text -┌───────minus(1, 0.9)─┐ -│ 0.09999999999999998 │ -└─────────────────────┘ -``` - -- Le résultat du calcul dépend de la méthode de calcul (le type de processeur et de l'architecture du système informatique). -- Les calculs à virgule flottante peuvent entraîner des nombres tels que l'infini (`Inf`) et “not-a-number” (`NaN`). Cela doit être pris en compte lors du traitement des résultats de calculs. -- Lors de l'analyse de nombres à virgule flottante à partir de texte, le résultat peut ne pas être le nombre représentable par machine le plus proche. - -## NaN et Inf {#data_type-float-nan-inf} - -Contrairement à SQL standard, ClickHouse prend en charge les catégories suivantes de nombres à virgule flottante: - -- `Inf` – Infinity. - - - -``` sql -SELECT 0.5 / 0 -``` - -``` text -┌─divide(0.5, 0)─┐ -│ inf │ -└────────────────┘ -``` - -- `-Inf` – Negative infinity. - - - -``` sql -SELECT -0.5 / 0 -``` - -``` text -┌─divide(-0.5, 0)─┐ -│ -inf │ -└─────────────────┘ -``` - -- `NaN` – Not a number. - - - -``` sql -SELECT 0 / 0 -``` - -``` text -┌─divide(0, 0)─┐ -│ nan │ -└──────────────┘ -``` - - See the rules for `NaN` sorting in the section [ORDER BY clause](../sql_reference/statements/select/order-by.md). - -[Article Original](https://clickhouse.tech/docs/en/data_types/float/) diff --git a/docs/fr/sql-reference/data-types/index.md b/docs/fr/sql-reference/data-types/index.md deleted file mode 100644 index 887e2efd69f..00000000000 --- a/docs/fr/sql-reference/data-types/index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Types De Donn\xE9es" -toc_priority: 37 -toc_title: Introduction ---- - -# Types De Données {#data_types} - -ClickHouse peut stocker différents types de données dans des cellules de table. - -Cette section décrit les types de données pris en charge et les considérations spéciales pour les utiliser et/ou les implémenter le cas échéant. - -[Article Original](https://clickhouse.tech/docs/en/data_types/) diff --git a/docs/fr/sql-reference/data-types/int-uint.md b/docs/fr/sql-reference/data-types/int-uint.md deleted file mode 100644 index 9b196c164a4..00000000000 --- a/docs/fr/sql-reference/data-types/int-uint.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 40 -toc_title: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 ---- - -# UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} - -Entiers de longueur fixe, avec ou sans signe. - -## Plages Int {#int-ranges} - -- Int8 - \[-128: 127\] -- Int16 - \[-32768: 32767\] -- Int32 - \[-2147483648: 2147483647\] -- Int64 - \[-9223372036854775808: 9223372036854775807\] - -## Plages Uint {#uint-ranges} - -- UInt8 - \[0: 255\] -- UInt16 - \[0: 65535\] -- UInt32- \[0: 4294967295\] -- UInt64- \[0: 18446744073709551615\] - -[Article Original](https://clickhouse.tech/docs/en/data_types/int_uint/) diff --git a/docs/fr/sql-reference/data-types/nested-data-structures/index.md b/docs/fr/sql-reference/data-types/nested-data-structures/index.md deleted file mode 100644 index 528e0bad0cd..00000000000 --- a/docs/fr/sql-reference/data-types/nested-data-structures/index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Structures De Donn\xE9es Imbriqu\xE9es" -toc_hidden: true -toc_priority: 54 -toc_title: "cach\xE9s" ---- - -# Structures De Données Imbriquées {#nested-data-structures} - -[Article Original](https://clickhouse.tech/docs/en/data_types/nested_data_structures/) diff --git a/docs/fr/sql-reference/data-types/nested-data-structures/nested.md b/docs/fr/sql-reference/data-types/nested-data-structures/nested.md deleted file mode 100644 index 2805780de24..00000000000 --- a/docs/fr/sql-reference/data-types/nested-data-structures/nested.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 57 -toc_title: "Imbriqu\xE9e(Type1 Nom1, Nom2 Type2, ...)" ---- - -# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} - -A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql-reference/statements/create.md) requête. Chaque ligne de table peut correspondre à n'importe quel nombre de lignes dans une structure de données imbriquée. - -Exemple: - -``` sql -CREATE TABLE test.visits -( - CounterID UInt32, - StartDate Date, - Sign Int8, - IsNew UInt8, - VisitID UInt64, - UserID UInt64, - ... - Goals Nested - ( - ID UInt32, - Serial UInt32, - EventTime DateTime, - Price Int64, - OrderID String, - CurrencyID UInt32 - ), - ... -) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign) -``` - -Cet exemple déclare le `Goals` structure de données imbriquée, qui contient des données sur les conversions (objectifs atteints). Chaque ligne de la ‘visits’ table peut correspondre à zéro ou n'importe quel nombre de conversions. - -Un seul niveau d'imbrication est pris en charge. Les colonnes de structures imbriquées contenant des tableaux sont équivalentes à des tableaux multidimensionnels, elles ont donc un support limité (il n'y a pas de support pour stocker ces colonnes dans des tables avec le moteur MergeTree). - -Dans la plupart des cas, lorsque vous travaillez avec une structure de données imbriquée, ses colonnes sont spécifiées avec des noms de colonnes séparés par un point. Ces colonnes constituent un tableau de types correspondants. Tous les tableaux de colonnes d'une structure de données imbriquée unique ont la même longueur. - -Exemple: - -``` sql -SELECT - Goals.ID, - Goals.EventTime -FROM test.visits -WHERE CounterID = 101500 AND length(Goals.ID) < 5 -LIMIT 10 -``` - -``` text -┌─Goals.ID───────────────────────┬─Goals.EventTime───────────────────────────────────────────────────────────────────────────┐ -│ [1073752,591325,591325] │ ['2014-03-17 16:38:10','2014-03-17 16:38:48','2014-03-17 16:42:27'] │ -│ [1073752] │ ['2014-03-17 00:28:25'] │ -│ [1073752] │ ['2014-03-17 10:46:20'] │ -│ [1073752,591325,591325,591325] │ ['2014-03-17 13:59:20','2014-03-17 22:17:55','2014-03-17 22:18:07','2014-03-17 22:18:51'] │ -│ [] │ [] │ -│ [1073752,591325,591325] │ ['2014-03-17 11:37:06','2014-03-17 14:07:47','2014-03-17 14:36:21'] │ -│ [] │ [] │ -│ [] │ [] │ -│ [591325,1073752] │ ['2014-03-17 00:46:05','2014-03-17 00:46:05'] │ -│ [1073752,591325,591325,591325] │ ['2014-03-17 13:28:33','2014-03-17 13:30:26','2014-03-17 18:51:21','2014-03-17 18:51:45'] │ -└────────────────────────────────┴───────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -Il est plus facile de penser à une structure de données imbriquée comme un ensemble de plusieurs tableaux de colonnes de la même longueur. - -Le seul endroit où une requête SELECT peut spécifier le nom d'une structure de données imbriquée entière au lieu de colonnes individuelles est la clause de jointure de tableau. Pour plus d'informations, voir “ARRAY JOIN clause”. Exemple: - -``` sql -SELECT - Goal.ID, - Goal.EventTime -FROM test.visits -ARRAY JOIN Goals AS Goal -WHERE CounterID = 101500 AND length(Goals.ID) < 5 -LIMIT 10 -``` - -``` text -┌─Goal.ID─┬──────Goal.EventTime─┐ -│ 1073752 │ 2014-03-17 16:38:10 │ -│ 591325 │ 2014-03-17 16:38:48 │ -│ 591325 │ 2014-03-17 16:42:27 │ -│ 1073752 │ 2014-03-17 00:28:25 │ -│ 1073752 │ 2014-03-17 10:46:20 │ -│ 1073752 │ 2014-03-17 13:59:20 │ -│ 591325 │ 2014-03-17 22:17:55 │ -│ 591325 │ 2014-03-17 22:18:07 │ -│ 591325 │ 2014-03-17 22:18:51 │ -│ 1073752 │ 2014-03-17 11:37:06 │ -└─────────┴─────────────────────┘ -``` - -Vous ne pouvez pas effectuer SELECT pour une structure de données imbriquée entière. Vous ne pouvez lister explicitement que les colonnes individuelles qui en font partie. - -Pour une requête INSERT, vous devez passer tous les tableaux de colonnes composant d'une structure de données imbriquée séparément (comme s'il s'agissait de tableaux de colonnes individuels). Au cours de l'insertion, le système vérifie qu'ils ont la même longueur. - -Pour une requête DESCRIBE, les colonnes d'une structure de données imbriquée sont répertoriées séparément de la même manière. - -La requête ALTER pour les éléments d'une structure de données imbriquée a des limites. - -[Article Original](https://clickhouse.tech/docs/en/data_types/nested_data_structures/nested/) diff --git a/docs/fr/sql-reference/data-types/nullable.md b/docs/fr/sql-reference/data-types/nullable.md deleted file mode 100644 index 6b37b571a96..00000000000 --- a/docs/fr/sql-reference/data-types/nullable.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 54 -toc_title: Nullable ---- - -# Nullable(typename) {#data_type-nullable} - -Permet de stocker marqueur spécial ([NULL](../../sql-reference/syntax.md)) qui dénote “missing value” aux valeurs normales autorisées par `TypeName`. Par exemple, un `Nullable(Int8)` type colonne peut stocker `Int8` type de valeurs, et les lignes qui n'ont pas de valeur magasin `NULL`. - -Pour un `TypeName` vous ne pouvez pas utiliser les types de données composites [Tableau](array.md) et [Tuple](tuple.md). Les types de données composites peuvent contenir `Nullable` valeurs de type, telles que `Array(Nullable(Int8))`. - -A `Nullable` le champ type ne peut pas être inclus dans les index de table. - -`NULL` est la valeur par défaut pour tout `Nullable` type, sauf indication contraire dans la configuration du serveur ClickHouse. - -## Caractéristiques De Stockage {#storage-features} - -Stocker `Nullable` valeurs de type dans une colonne de table, ClickHouse utilise un fichier séparé avec `NULL` masques en plus du fichier normal avec des valeurs. Les entrées du fichier masks permettent à ClickHouse de faire la distinction entre `NULL` et une valeur par défaut du type de données correspondant pour chaque ligne de table. En raison d'un fichier supplémentaire, `Nullable` colonne consomme de l'espace de stockage supplémentaire par rapport à une normale similaire. - -!!! info "Note" - Utiliser `Nullable` affecte presque toujours négativement les performances, gardez cela à l'esprit lors de la conception de vos bases de données. - -## Exemple D'Utilisation {#usage-example} - -``` sql -CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog -``` - -``` sql -INSERT INTO t_null VALUES (1, NULL), (2, 3) -``` - -``` sql -SELECT x + y FROM t_null -``` - -``` text -┌─plus(x, y)─┐ -│ ᴺᵁᴸᴸ │ -│ 5 │ -└────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/data_types/nullable/) diff --git a/docs/fr/sql-reference/data-types/simpleaggregatefunction.md b/docs/fr/sql-reference/data-types/simpleaggregatefunction.md deleted file mode 100644 index 81fcd67cfae..00000000000 --- a/docs/fr/sql-reference/data-types/simpleaggregatefunction.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# SimpleAggregateFunction {#data-type-simpleaggregatefunction} - -`SimpleAggregateFunction(name, types_of_arguments…)` le type de données stocke la valeur actuelle de la fonction d'agrégat et ne stocke pas son état complet comme [`AggregateFunction`](aggregatefunction.md) faire. Cette optimisation peut être appliquée aux fonctions pour lesquelles la propriété suivante est conservée: le résultat de l'application d'une fonction `f` pour un ensemble de lignes `S1 UNION ALL S2` peut être obtenu en appliquant `f` pour les parties de la ligne définie séparément, puis à nouveau l'application `f` pour les résultats: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. Cette propriété garantit que les résultats d'agrégation partielle sont suffisants pour calculer le combiné, de sorte que nous n'avons pas à stocker et traiter de données supplémentaires. - -Les fonctions d'agrégation suivantes sont prises en charge: - -- [`any`](../../sql-reference/aggregate-functions/reference.md#agg_function-any) -- [`anyLast`](../../sql-reference/aggregate-functions/reference.md#anylastx) -- [`min`](../../sql-reference/aggregate-functions/reference.md#agg_function-min) -- [`max`](../../sql-reference/aggregate-functions/reference.md#agg_function-max) -- [`sum`](../../sql-reference/aggregate-functions/reference.md#agg_function-sum) -- [`groupBitAnd`](../../sql-reference/aggregate-functions/reference.md#groupbitand) -- [`groupBitOr`](../../sql-reference/aggregate-functions/reference.md#groupbitor) -- [`groupBitXor`](../../sql-reference/aggregate-functions/reference.md#groupbitxor) - -Les valeurs de la `SimpleAggregateFunction(func, Type)` regarder et stockées de la même manière que `Type`, de sorte que vous n'avez pas besoin d'appliquer des fonctions avec `-Merge`/`-State` suffixe. `SimpleAggregateFunction` a de meilleures performances que `AggregateFunction` avec la même fonction d'agrégation. - -**Paramètre** - -- Nom de la fonction d'agrégation. -- Types des arguments de la fonction d'agrégation. - -**Exemple** - -``` sql -CREATE TABLE t -( - column1 SimpleAggregateFunction(sum, UInt64), - column2 SimpleAggregateFunction(any, String) -) ENGINE = ... -``` - -[Article Original](https://clickhouse.tech/docs/en/data_types/simpleaggregatefunction/) diff --git a/docs/fr/sql-reference/data-types/special-data-types/expression.md b/docs/fr/sql-reference/data-types/special-data-types/expression.md deleted file mode 100644 index c3ba5e42ba1..00000000000 --- a/docs/fr/sql-reference/data-types/special-data-types/expression.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 58 -toc_title: Expression ---- - -# Expression {#expression} - -Les Expressions sont utilisées pour représenter des lambdas dans des fonctions d'ordre Élevé. - -[Article Original](https://clickhouse.tech/docs/en/data_types/special_data_types/expression/) diff --git a/docs/fr/sql-reference/data-types/special-data-types/index.md b/docs/fr/sql-reference/data-types/special-data-types/index.md deleted file mode 100644 index 6d292dc522e..00000000000 --- a/docs/fr/sql-reference/data-types/special-data-types/index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "Types De Donn\xE9es Sp\xE9ciaux" -toc_hidden: true -toc_priority: 55 -toc_title: "cach\xE9s" ---- - -# Types De Données Spéciaux {#special-data-types} - -Les valeurs de type de données spéciales ne peuvent pas être sérialisées pour l'enregistrement dans une table ou la sortie dans les résultats de la requête, mais peuvent être utilisées comme résultat intermédiaire lors de l'exécution de la requête. - -[Article Original](https://clickhouse.tech/docs/en/data_types/special_data_types/) diff --git a/docs/fr/sql-reference/data-types/special-data-types/interval.md b/docs/fr/sql-reference/data-types/special-data-types/interval.md deleted file mode 100644 index 464de8a10ab..00000000000 --- a/docs/fr/sql-reference/data-types/special-data-types/interval.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 61 -toc_title: Intervalle ---- - -# Intervalle {#data-type-interval} - -Famille de types de données représentant des intervalles d'heure et de date. Les types de la [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) opérateur. - -!!! warning "Avertissement" - `Interval` les valeurs de type de données ne peuvent pas être stockées dans les tables. - -Structure: - -- Intervalle de temps en tant que valeur entière non signée. -- Type de l'intervalle. - -Types d'intervalles pris en charge: - -- `SECOND` -- `MINUTE` -- `HOUR` -- `DAY` -- `WEEK` -- `MONTH` -- `QUARTER` -- `YEAR` - -Pour chaque type d'intervalle, il existe un type de données distinct. Par exemple, l' `DAY` l'intervalle correspond au `IntervalDay` type de données: - -``` sql -SELECT toTypeName(INTERVAL 4 DAY) -``` - -``` text -┌─toTypeName(toIntervalDay(4))─┐ -│ IntervalDay │ -└──────────────────────────────┘ -``` - -## Utilisation Remarques {#data-type-interval-usage-remarks} - -Vous pouvez utiliser `Interval`-tapez des valeurs dans des opérations arithmétiques avec [Date](../../../sql-reference/data-types/date.md) et [DateTime](../../../sql-reference/data-types/datetime.md)-type de valeurs. Par exemple, vous pouvez ajouter 4 jours à l'heure actuelle: - -``` sql -SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY -``` - -``` text -┌───current_date_time─┬─plus(now(), toIntervalDay(4))─┐ -│ 2019-10-23 10:58:45 │ 2019-10-27 10:58:45 │ -└─────────────────────┴───────────────────────────────┘ -``` - -Les intervalles avec différents types ne peuvent pas être combinés. Vous ne pouvez pas utiliser des intervalles comme `4 DAY 1 HOUR`. Spécifiez des intervalles en unités inférieures ou égales à la plus petite unité de l'intervalle, par exemple, l'intervalle `1 day and an hour` l'intervalle peut être exprimée comme `25 HOUR` ou `90000 SECOND`. - -Vous ne pouvez pas effectuer d'opérations arithmétiques avec `Interval`- tapez des valeurs, mais vous pouvez ajouter des intervalles de différents types par conséquent aux valeurs dans `Date` ou `DateTime` types de données. Exemple: - -``` sql -SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR -``` - -``` text -┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ -│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ -└─────────────────────┴────────────────────────────────────────────────────────┘ -``` - -La requête suivante provoque une exception: - -``` sql -select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) -``` - -``` text -Received exception from server (version 19.14.1): -Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. -``` - -## Voir Aussi {#see-also} - -- [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) opérateur -- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type fonctions de conversion diff --git a/docs/fr/sql-reference/data-types/special-data-types/nothing.md b/docs/fr/sql-reference/data-types/special-data-types/nothing.md deleted file mode 100644 index 2e3d76b7207..00000000000 --- a/docs/fr/sql-reference/data-types/special-data-types/nothing.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 60 -toc_title: Rien ---- - -# Rien {#nothing} - -Le seul but de ce type de données est de représenter les cas où une valeur n'est pas prévu. Donc vous ne pouvez pas créer un `Nothing` type de valeur. - -Par exemple, littéral [NULL](../../../sql-reference/syntax.md#null-literal) a type de `Nullable(Nothing)`. Voir plus sur [Nullable](../../../sql-reference/data-types/nullable.md). - -Le `Nothing` type peut également être utilisé pour désigner des tableaux vides: - -``` sql -SELECT toTypeName(array()) -``` - -``` text -┌─toTypeName(array())─┐ -│ Array(Nothing) │ -└─────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/data_types/special_data_types/nothing/) diff --git a/docs/fr/sql-reference/data-types/special-data-types/set.md b/docs/fr/sql-reference/data-types/special-data-types/set.md deleted file mode 100644 index 8f50175bb6b..00000000000 --- a/docs/fr/sql-reference/data-types/special-data-types/set.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 59 -toc_title: "D\xE9finir" ---- - -# Définir {#set} - -Utilisé pour la moitié droite d'un [IN](../../operators/in.md#select-in-operators) expression. - -[Article Original](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) diff --git a/docs/fr/sql-reference/data-types/string.md b/docs/fr/sql-reference/data-types/string.md deleted file mode 100644 index b82e1fe6c69..00000000000 --- a/docs/fr/sql-reference/data-types/string.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 44 -toc_title: "Cha\xEEne" ---- - -# Chaîne {#string} - -Les chaînes d'une longueur arbitraire. La longueur n'est pas limitée. La valeur peut contenir un ensemble arbitraire d'octets, y compris des octets nuls. -Le type de chaîne remplace les types VARCHAR, BLOB, CLOB et autres provenant d'autres SGBD. - -## Encodage {#encodings} - -ClickHouse n'a pas le concept d'encodages. Les chaînes peuvent contenir un ensemble arbitraire d'octets, qui sont stockés et sortis tels quels. -Si vous avez besoin de stocker des textes, nous vous recommandons d'utiliser L'encodage UTF-8. À tout le moins, si votre terminal utilise UTF-8 (comme recommandé), vous pouvez lire et écrire vos valeurs sans effectuer de conversions. -De même, certaines fonctions pour travailler avec des chaînes ont des variations distinctes qui fonctionnent sous l'hypothèse que la chaîne contient un ensemble d'octets représentant un texte codé en UTF-8. -Par exemple, l' ‘length’ fonction calcule la longueur de la chaîne en octets, tandis que le ‘lengthUTF8’ la fonction calcule la longueur de la chaîne en points de code Unicode, en supposant que la valeur est encodée en UTF-8. - -[Article Original](https://clickhouse.tech/docs/en/data_types/string/) diff --git a/docs/fr/sql-reference/data-types/tuple.md b/docs/fr/sql-reference/data-types/tuple.md deleted file mode 100644 index ab9db735181..00000000000 --- a/docs/fr/sql-reference/data-types/tuple.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 53 -toc_title: Tuple (T1, T2,...) ---- - -# Tuple(t1, T2, …) {#tuplet1-t2} - -Un n-uplet d'éléments, chacun ayant une personne [type](index.md#data_types). - -Les Tuples sont utilisés pour le regroupement temporaire de colonnes. Les colonnes peuvent être regroupées lorsqu'une expression IN est utilisée dans une requête et pour spécifier certains paramètres formels des fonctions lambda. Pour plus d'informations, voir les sections [Dans les opérateurs](../../sql-reference/operators/in.md) et [Des fonctions d'ordre supérieur](../../sql-reference/functions/higher-order-functions.md). - -Les Tuples peuvent être le résultat d'une requête. Dans ce cas, pour les formats de texte autres que JSON, les valeurs sont séparées par des virgules entre parenthèses. Dans les formats JSON, les tuples sont sortis sous forme de tableaux (entre crochets). - -## La création d'un Tuple {#creating-a-tuple} - -Vous pouvez utiliser une fonction pour créer un tuple: - -``` sql -tuple(T1, T2, ...) -``` - -Exemple de création d'un tuple: - -``` sql -SELECT tuple(1,'a') AS x, toTypeName(x) -``` - -``` text -┌─x───────┬─toTypeName(tuple(1, 'a'))─┐ -│ (1,'a') │ Tuple(UInt8, String) │ -└─────────┴───────────────────────────┘ -``` - -## Utilisation de Types de données {#working-with-data-types} - -Lors de la création d'un tuple à la volée, ClickHouse détecte automatiquement le type de chaque argument comme le minimum des types qui peuvent stocker la valeur de l'argument. Si l'argument est [NULL](../../sql-reference/syntax.md#null-literal) le type de l'élément tuple est [Nullable](nullable.md). - -Exemple de détection automatique de type de données: - -``` sql -SELECT tuple(1, NULL) AS x, toTypeName(x) -``` - -``` text -┌─x────────┬─toTypeName(tuple(1, NULL))──────┐ -│ (1,NULL) │ Tuple(UInt8, Nullable(Nothing)) │ -└──────────┴─────────────────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/data_types/tuple/) diff --git a/docs/fr/sql-reference/data-types/uuid.md b/docs/fr/sql-reference/data-types/uuid.md deleted file mode 100644 index 60973a3f855..00000000000 --- a/docs/fr/sql-reference/data-types/uuid.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 46 -toc_title: UUID ---- - -# UUID {#uuid-data-type} - -Un identifiant unique universel (UUID) est un numéro de 16 octets utilisé pour identifier les enregistrements. Pour plus d'informations sur L'UUID, voir [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier). - -L'exemple de valeur de type UUID est représenté ci-dessous: - -``` text -61f0c404-5cb3-11e7-907b-a6006ad3dba0 -``` - -Si vous ne spécifiez pas la valeur de la colonne UUID lors de l'insertion d'un nouvel enregistrement, la valeur UUID est remplie avec zéro: - -``` text -00000000-0000-0000-0000-000000000000 -``` - -## Comment générer {#how-to-generate} - -Pour générer la valeur UUID, ClickHouse fournit [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) fonction. - -## Exemple D'Utilisation {#usage-example} - -**Exemple 1** - -Cet exemple montre la création d'une table avec la colonne de type UUID et l'insertion d'une valeur dans la table. - -``` sql -CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog -``` - -``` sql -INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' -``` - -``` sql -SELECT * FROM t_uuid -``` - -``` text -┌────────────────────────────────────x─┬─y─────────┐ -│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ -└──────────────────────────────────────┴───────────┘ -``` - -**Exemple 2** - -Dans cet exemple, la valeur de la colonne UUID n'est pas spécifiée lors de l'insertion d'un nouvel enregistrement. - -``` sql -INSERT INTO t_uuid (y) VALUES ('Example 2') -``` - -``` sql -SELECT * FROM t_uuid -``` - -``` text -┌────────────────────────────────────x─┬─y─────────┐ -│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ -│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ -└──────────────────────────────────────┴───────────┘ -``` - -## Restriction {#restrictions} - -Le type de données UUID ne prend en charge que les fonctions qui [Chaîne](string.md) type de données prend également en charge (par exemple, [min](../../sql-reference/aggregate-functions/reference.md#agg_function-min), [Max](../../sql-reference/aggregate-functions/reference.md#agg_function-max), et [compter](../../sql-reference/aggregate-functions/reference.md#agg_function-count)). - -Le type de données UUID n'est pas pris en charge par les opérations arithmétiques (par exemple, [ABS](../../sql-reference/functions/arithmetic-functions.md#arithm_func-abs)) ou des fonctions d'agrégation, comme [somme](../../sql-reference/aggregate-functions/reference.md#agg_function-sum) et [avg](../../sql-reference/aggregate-functions/reference.md#agg_function-avg). - -[Article Original](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md b/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md deleted file mode 100644 index cc238f02f3a..00000000000 --- a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 45 -toc_title: "Dictionnaires hi\xE9rarchiques" ---- - -# Dictionnaires Hiérarchiques {#hierarchical-dictionaries} - -Clickhouse prend en charge les dictionnaires hiérarchiques avec un [touche numérique](external-dicts-dict-structure.md#ext_dict-numeric-key). - -Voici une structure hiérarchique: - -``` text -0 (Common parent) -│ -├── 1 (Russia) -│ │ -│ └── 2 (Moscow) -│ │ -│ └── 3 (Center) -│ -└── 4 (Great Britain) - │ - └── 5 (London) -``` - -Cette hiérarchie peut être exprimée comme la table de dictionnaire suivante. - -| id_région | région_parent | nom_région | -|------------|----------------|--------------------| -| 1 | 0 | Russie | -| 2 | 1 | Moscou | -| 3 | 2 | Center | -| 4 | 0 | La Grande-Bretagne | -| 5 | 4 | Londres | - -Ce tableau contient une colonne `parent_region` qui contient la clé du parent le plus proche de l'élément. - -Clickhouse soutient le [hiérarchique](external-dicts-dict-structure.md#hierarchical-dict-attr) propriété pour [externe dictionnaire](index.md) attribut. Cette propriété vous permet de configurer le dictionnaire hiérarchique comme décrit ci-dessus. - -Le [dictGetHierarchy](../../../sql-reference/functions/ext-dict-functions.md#dictgethierarchy) la fonction vous permet d'obtenir la chaîne parent d'un élément. - -Pour notre exemple, la structure du dictionnaire peut être la suivante: - -``` xml - - - - region_id - - - - parent_region - UInt64 - 0 - true - - - - region_name - String - - - - - -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md deleted file mode 100644 index 2569329fefd..00000000000 --- a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ /dev/null @@ -1,407 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 41 -toc_title: "Stockage des dictionnaires en m\xE9moire" ---- - -# Stockage des dictionnaires en mémoire {#dicts-external-dicts-dict-layout} - -Il existe une variété de façons de stocker les dictionnaires en mémoire. - -Nous vous recommandons [plat](#flat), [haché](#dicts-external_dicts_dict_layout-hashed) et [complex_key_hashed](#complex-key-hashed). qui fournissent la vitesse de traitement optimale. - -La mise en cache n'est pas recommandée en raison de performances potentiellement médiocres et de difficultés à sélectionner les paramètres optimaux. En savoir plus dans la section “[cache](#cache)”. - -Il existe plusieurs façons d'améliorer les performances du dictionnaire: - -- Appelez la fonction pour travailler avec le dictionnaire après `GROUP BY`. -- Marquer les attributs à extraire comme injectifs. Un attribut est appelé injectif si différentes valeurs d'attribut correspondent à différentes clés. Alors, quand `GROUP BY` utilise une fonction qui récupère une valeur d'attribut par la clé, cette fonction est automatiquement retirée de `GROUP BY`. - -ClickHouse génère une exception pour les erreurs avec les dictionnaires. Des exemples d'erreurs: - -- Le dictionnaire accessible n'a pas pu être chargé. -- Erreur de la requête d'une `cached` dictionnaire. - -Vous pouvez afficher la liste des dictionnaires externes et leurs statuts dans le `system.dictionaries` table. - -La configuration ressemble à ceci: - -``` xml - - - ... - - - - - - ... - - -``` - -Correspondant [DDL-requête](../../statements/create.md#create-dictionary-query): - -``` sql -CREATE DICTIONARY (...) -... -LAYOUT(LAYOUT_TYPE(param value)) -- layout settings -... -``` - -## Façons de stocker des dictionnaires en mémoire {#ways-to-store-dictionaries-in-memory} - -- [plat](#flat) -- [haché](#dicts-external_dicts_dict_layout-hashed) -- [sparse_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) -- [cache](#cache) -- [direct](#direct) -- [range_hashed](#range-hashed) -- [complex_key_hashed](#complex-key-hashed) -- [complex_key_cache](#complex-key-cache) -- [complex_key_direct](#complex-key-direct) -- [ip_trie](#ip-trie) - -### plat {#flat} - -Le dictionnaire est complètement stocké en mémoire sous la forme de tableaux plats. Combien de mémoire le dictionnaire utilise-t-il? Le montant est proportionnel à la taille de la plus grande clé (dans l'espace). - -La clé du dictionnaire a le `UInt64` type et la valeur est limitée à 500 000. Si une clé plus grande est découverte lors de la création du dictionnaire, ClickHouse lève une exception et ne crée pas le dictionnaire. - -Tous les types de sources sont pris en charge. Lors de la mise à jour, les données (à partir d'un fichier ou d'une table) sont lues dans leur intégralité. - -Cette méthode fournit les meilleures performances parmi toutes les méthodes disponibles de stockage du dictionnaire. - -Exemple de Configuration: - -``` xml - - - -``` - -ou - -``` sql -LAYOUT(FLAT()) -``` - -### haché {#dicts-external_dicts_dict_layout-hashed} - -Le dictionnaire est entièrement stockée en mémoire sous la forme d'une table de hachage. Le dictionnaire peut contenir n'importe quel nombre d'éléments avec tous les identificateurs Dans la pratique, le nombre de clés peut atteindre des dizaines de millions d'articles. - -Tous les types de sources sont pris en charge. Lors de la mise à jour, les données (à partir d'un fichier ou d'une table) sont lues dans leur intégralité. - -Exemple de Configuration: - -``` xml - - - -``` - -ou - -``` sql -LAYOUT(HASHED()) -``` - -### sparse_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} - -Semblable à `hashed`, mais utilise moins de mémoire en faveur de plus D'utilisation du processeur. - -Exemple de Configuration: - -``` xml - - - -``` - -``` sql -LAYOUT(SPARSE_HASHED()) -``` - -### complex_key_hashed {#complex-key-hashed} - -Ce type de stockage est pour une utilisation avec composite [touches](external-dicts-dict-structure.md). Semblable à `hashed`. - -Exemple de Configuration: - -``` xml - - - -``` - -``` sql -LAYOUT(COMPLEX_KEY_HASHED()) -``` - -### range_hashed {#range-hashed} - -Le dictionnaire est stocké en mémoire sous la forme d'une table de hachage avec un tableau ordonné de gammes et leurs valeurs correspondantes. - -Cette méthode de stockage fonctionne de la même manière que hachée et permet d'utiliser des plages de date / heure (Type numérique arbitraire) en plus de la clé. - -Exemple: Le tableau contient des réductions pour chaque annonceur dans le format: - -``` text -+---------|-------------|-------------|------+ -| advertiser id | discount start date | discount end date | amount | -+===============+=====================+===================+========+ -| 123 | 2015-01-01 | 2015-01-15 | 0.15 | -+---------|-------------|-------------|------+ -| 123 | 2015-01-16 | 2015-01-31 | 0.25 | -+---------|-------------|-------------|------+ -| 456 | 2015-01-01 | 2015-01-15 | 0.05 | -+---------|-------------|-------------|------+ -``` - -Pour utiliser un échantillon pour les plages de dates, définissez `range_min` et `range_max` éléments dans le [structure](external-dicts-dict-structure.md). Ces éléments doivent contenir des éléments `name` et`type` (si `type` n'est pas spécifié, le type par défaut sera utilisé-Date). `type` peut être n'importe quel type numérique (Date / DateTime / UInt64 / Int32 / autres). - -Exemple: - -``` xml - - - Id - - - first - Date - - - last - Date - - ... -``` - -ou - -``` sql -CREATE DICTIONARY somedict ( - id UInt64, - first Date, - last Date -) -PRIMARY KEY id -LAYOUT(RANGE_HASHED()) -RANGE(MIN first MAX last) -``` - -Pour travailler avec ces dictionnaires, vous devez passer un argument supplémentaire à l' `dictGetT` fonction, pour laquelle une plage est sélectionnée: - -``` sql -dictGetT('dict_name', 'attr_name', id, date) -``` - -Cette fonction retourne la valeur pour l' `id`s et la plage de dates qui inclut la date passée. - -Détails de l'algorithme: - -- Si l' `id` est introuvable ou une plage n'est pas trouvé pour l' `id` il retourne la valeur par défaut pour le dictionnaire. -- S'il y a des plages qui se chevauchent, vous pouvez en utiliser. -- Si le délimiteur est `NULL` ou une date non valide (telle que 1900-01-01 ou 2039-01-01), la plage est laissée ouverte. La gamme peut être ouverte des deux côtés. - -Exemple de Configuration: - -``` xml - - - - ... - - - - - - - - Abcdef - - - StartTimeStamp - UInt64 - - - EndTimeStamp - UInt64 - - - XXXType - String - - - - - - -``` - -ou - -``` sql -CREATE DICTIONARY somedict( - Abcdef UInt64, - StartTimeStamp UInt64, - EndTimeStamp UInt64, - XXXType String DEFAULT '' -) -PRIMARY KEY Abcdef -RANGE(MIN StartTimeStamp MAX EndTimeStamp) -``` - -### cache {#cache} - -Le dictionnaire est stocké dans un cache qui a un nombre fixe de cellules. Ces cellules contiennent des éléments fréquemment utilisés. - -Lors de la recherche d'un dictionnaire, le cache est recherché en premier. Pour chaque bloc de données, toutes les clés qui ne sont pas trouvées dans le cache ou qui sont obsolètes sont demandées à la source en utilisant `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. Les données reçues sont ensuite écrites dans le cache. - -Pour les dictionnaires de cache, l'expiration [vie](external-dicts-dict-lifetime.md) des données dans le cache peuvent être définies. Si plus de temps que `lifetime` passé depuis le chargement des données dans une cellule, la valeur de la cellule n'est pas utilisée et elle est demandée à nouveau la prochaine fois qu'elle doit être utilisée. -C'est la moins efficace de toutes les façons de stocker les dictionnaires. La vitesse du cache dépend fortement des paramètres corrects et que le scénario d'utilisation. Un dictionnaire de type de cache fonctionne bien uniquement lorsque les taux de réussite sont suffisamment élevés (recommandé 99% et plus). Vous pouvez afficher le taux de réussite moyen dans le `system.dictionaries` table. - -Pour améliorer les performances du cache, utilisez une sous-requête avec `LIMIT`, et appelez la fonction avec le dictionnaire en externe. - -Soutenu [source](external-dicts-dict-sources.md): MySQL, ClickHouse, exécutable, HTTP. - -Exemple de paramètres: - -``` xml - - - - 1000000000 - - -``` - -ou - -``` sql -LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) -``` - -Définissez une taille de cache suffisamment grande. Vous devez expérimenter pour sélectionner le nombre de cellules: - -1. Définissez une valeur. -2. Exécutez les requêtes jusqu'à ce que le cache soit complètement plein. -3. Évaluer la consommation de mémoire en utilisant le `system.dictionaries` table. -4. Augmentez ou diminuez le nombre de cellules jusqu'à ce que la consommation de mémoire requise soit atteinte. - -!!! warning "Avertissement" - N'utilisez pas ClickHouse comme source, car le traitement des requêtes avec des lectures aléatoires est lent. - -### complex_key_cache {#complex-key-cache} - -Ce type de stockage est pour une utilisation avec composite [touches](external-dicts-dict-structure.md). Semblable à `cache`. - -### direct {#direct} - -Le dictionnaire n'est pas stocké dans la mémoire et va directement à la source, pendant le traitement d'une demande. - -La clé du dictionnaire a le `UInt64` type. - -Tous les types de [source](external-dicts-dict-sources.md), sauf les fichiers locaux, sont pris en charge. - -Exemple de Configuration: - -``` xml - - - -``` - -ou - -``` sql -LAYOUT(DIRECT()) -``` - -### complex_key_direct {#complex-key-direct} - -Ce type de stockage est destiné à être utilisé avec des [clés](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md) composites. Similaire à `direct` - -### ip_trie {#ip-trie} - -Ce type de stockage permet de mapper des préfixes de réseau (adresses IP) à des métadonnées telles que ASN. - -Exemple: la table contient les préfixes de réseau et leur correspondant en tant que numéro et Code de pays: - -``` text - +-----------|-----|------+ - | prefix | asn | cca2 | - +=================+=======+========+ - | 202.79.32.0/20 | 17501 | NP | - +-----------|-----|------+ - | 2620:0:870::/48 | 3856 | US | - +-----------|-----|------+ - | 2a02:6b8:1::/48 | 13238 | RU | - +-----------|-----|------+ - | 2001:db8::/32 | 65536 | ZZ | - +-----------|-----|------+ -``` - -Lorsque vous utilisez ce type de mise en page, la structure doit avoir une clé composite. - -Exemple: - -``` xml - - - - prefix - String - - - - asn - UInt32 - - - - cca2 - String - ?? - - ... - - - - true - - -``` - -ou - -``` sql -CREATE DICTIONARY somedict ( - prefix String, - asn UInt32, - cca2 String DEFAULT '??' -) -PRIMARY KEY prefix -``` - -La clé ne doit avoir qu'un seul attribut de type chaîne contenant un préfixe IP autorisé. Les autres types ne sont pas encore pris en charge. - -Pour les requêtes, vous devez utiliser les mêmes fonctions (`dictGetT` avec un n-uplet) comme pour les dictionnaires avec des clés composites: - -``` sql -dictGetT('dict_name', 'attr_name', tuple(ip)) -``` - -La fonction prend soit `UInt32` pour IPv4, ou `FixedString(16)` pour IPv6: - -``` sql -dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) -``` - -Les autres types ne sont pas encore pris en charge. La fonction renvoie l'attribut du préfixe correspondant à cette adresse IP. S'il y a chevauchement des préfixes, le plus spécifique est retourné. - -Les données doit complètement s'intégrer dans la RAM. - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md deleted file mode 100644 index 8ce78919ff1..00000000000 --- a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 42 -toc_title: "Mises \xC0 Jour Du Dictionnaire" ---- - -# Mises À Jour Du Dictionnaire {#dictionary-updates} - -ClickHouse met périodiquement à jour les dictionnaires. L'intervalle de mise à jour pour les dictionnaires entièrement téléchargés et l'intervalle d'invalidation pour les dictionnaires `` tag en quelques secondes. - -Les mises à jour du dictionnaire (autres que le chargement pour la première utilisation) ne bloquent pas les requêtes. Lors des mises à jour, l'ancienne version d'un dictionnaire est utilisée. Si une erreur se produit pendant une mise à jour, l'erreur est écrite dans le journal du serveur et les requêtes continuent d'utiliser l'ancienne version des dictionnaires. - -Exemple de paramètres: - -``` xml - - ... - 300 - ... - -``` - -``` sql -CREATE DICTIONARY (...) -... -LIFETIME(300) -... -``` - -Paramètre `0` (`LIFETIME(0)`) empêche la mise à jour des dictionnaires. - -Vous pouvez définir un intervalle de temps pour les mises à niveau, et ClickHouse choisira un temps uniformément aléatoire dans cette plage. Ceci est nécessaire pour répartir la charge sur la source du dictionnaire lors de la mise à niveau sur un grand nombre de serveurs. - -Exemple de paramètres: - -``` xml - - ... - - 300 - 360 - - ... - -``` - -ou - -``` sql -LIFETIME(MIN 300 MAX 360) -``` - -Si `0` et `0`, ClickHouse ne recharge pas le dictionnaire par timeout. -Dans ce cas, ClickHouse peut recharger le dictionnaire plus tôt si le fichier de configuration du dictionnaire a été `SYSTEM RELOAD DICTIONARY` la commande a été exécutée. - -Lors de la mise à niveau des dictionnaires, le serveur ClickHouse applique une logique différente selon le type de [source](external-dicts-dict-sources.md): - -Lors de la mise à niveau des dictionnaires, le serveur ClickHouse applique une logique différente selon le type de [source](external-dicts-dict-sources.md): - -- Pour un fichier texte, il vérifie l'heure de la modification. Si l'heure diffère de l'heure enregistrée précédemment, le dictionnaire est mis à jour. -- Pour les tables MyISAM, l'Heure de modification est vérifiée à l'aide d'un `SHOW TABLE STATUS` requête. -- Les dictionnaires d'autres sources sont mis à jour à chaque fois par défaut. - -Pour les sources MySQL (InnoDB), ODBC et ClickHouse, vous pouvez configurer une requête qui mettra à jour les dictionnaires uniquement s'ils ont vraiment changé, plutôt que chaque fois. Pour ce faire, suivez ces étapes: - -- La table de dictionnaire doit avoir un champ qui change toujours lorsque les données source sont mises à jour. -- Les paramètres de la source doivent spécifier une requête qui récupère le champ de modification. Le serveur ClickHouse interprète le résultat de la requête comme une ligne, et si cette ligne a changé par rapport à son état précédent, le dictionnaire est mis à jour. Spécifier la requête dans le `` champ dans les paramètres pour le [source](external-dicts-dict-sources.md). - -Exemple de paramètres: - -``` xml - - ... - - ... - SELECT update_time FROM dictionary_source where id = 1 - - ... - -``` - -ou - -``` sql -... -SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) -... -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md deleted file mode 100644 index 4c608fa7188..00000000000 --- a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ /dev/null @@ -1,630 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 43 -toc_title: Sources de dictionnaires externes ---- - -# Sources de dictionnaires externes {#dicts-external-dicts-dict-sources} - -Externe dictionnaire peut être connecté à partir de nombreuses sources différentes. - -Si dictionary est configuré à l'aide de xml-file, la configuration ressemble à ceci: - -``` xml - - - ... - - - - - - ... - - ... - -``` - -En cas de [DDL-requête](../../statements/create.md#create-dictionary-query), configuration égale ressemblera à: - -``` sql -CREATE DICTIONARY dict_name (...) -... -SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration -... -``` - -La source est configurée dans le `source` section. - -Pour les types de source [Fichier Local](#dicts-external_dicts_dict_sources-local_file), [Fichier exécutable](#dicts-external_dicts_dict_sources-executable), [HTTP(S)](#dicts-external_dicts_dict_sources-http), [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) -les paramètres optionnels sont disponibles: - -``` xml - - - /opt/dictionaries/os.tsv - TabSeparated - - - 0 - - -``` - -ou - -``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) -SETTINGS(format_csv_allow_single_quotes = 0) -``` - -Les Types de sources (`source_type`): - -- [Fichier Local](#dicts-external_dicts_dict_sources-local_file) -- [Fichier exécutable](#dicts-external_dicts_dict_sources-executable) -- [HTTP(S)](#dicts-external_dicts_dict_sources-http) -- DBMS - - [ODBC](#dicts-external_dicts_dict_sources-odbc) - - [MySQL](#dicts-external_dicts_dict_sources-mysql) - - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - - [Redis](#dicts-external_dicts_dict_sources-redis) - -## Fichier Local {#dicts-external_dicts_dict_sources-local_file} - -Exemple de paramètres: - -``` xml - - - /opt/dictionaries/os.tsv - TabSeparated - - -``` - -ou - -``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) -``` - -Définition des champs: - -- `path` – The absolute path to the file. -- `format` – The file format. All the formats described in “[Format](../../../interfaces/formats.md#formats)” sont pris en charge. - -## Fichier Exécutable {#dicts-external_dicts_dict_sources-executable} - -Travailler avec des fichiers exécutables en dépend [comment le dictionnaire est stocké dans la mémoire](external-dicts-dict-layout.md). Si le dictionnaire est stocké en utilisant `cache` et `complex_key_cache`, Clickhouse demande les clés nécessaires en envoyant une requête au STDIN du fichier exécutable. Sinon, ClickHouse démarre le fichier exécutable et traite sa sortie comme des données de dictionnaire. - -Exemple de paramètres: - -``` xml - - - cat /opt/dictionaries/os.tsv - TabSeparated - - -``` - -ou - -``` sql -SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) -``` - -Définition des champs: - -- `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). -- `format` – The file format. All the formats described in “[Format](../../../interfaces/formats.md#formats)” sont pris en charge. - -## Http(s) {#dicts-external_dicts_dict_sources-http} - -Travailler avec un serveur HTTP (S) dépend de [comment le dictionnaire est stocké dans la mémoire](external-dicts-dict-layout.md). Si le dictionnaire est stocké en utilisant `cache` et `complex_key_cache`, Clickhouse demande les clés nécessaires en envoyant une demande via le `POST` méthode. - -Exemple de paramètres: - -``` xml - - - http://[::1]/os.tsv - TabSeparated - - user - password - - -
- API-KEY - key -
-
-
- -``` - -ou - -``` sql -SOURCE(HTTP( - url 'http://[::1]/os.tsv' - format 'TabSeparated' - credentials(user 'user' password 'password') - headers(header(name 'API-KEY' value 'key')) -)) -``` - -Pour que ClickHouse accède à une ressource HTTPS, vous devez [configurer openSSL](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl) dans la configuration du serveur. - -Définition des champs: - -- `url` – The source URL. -- `format` – The file format. All the formats described in “[Format](../../../interfaces/formats.md#formats)” sont pris en charge. -- `credentials` – Basic HTTP authentication. Optional parameter. - - `user` – Username required for the authentication. - - `password` – Password required for the authentication. -- `headers` – All custom HTTP headers entries used for the HTTP request. Optional parameter. - - `header` – Single HTTP header entry. - - `name` – Identifiant name used for the header send on the request. - - `value` – Value set for a specific identifiant name. - -## ODBC {#dicts-external_dicts_dict_sources-odbc} - -Vous pouvez utiliser cette méthode pour connecter n'importe quelle base de données dotée d'un pilote ODBC. - -Exemple de paramètres: - -``` xml - - - DatabaseName - ShemaName.TableName
- DSN=some_parameters - SQL_QUERY -
- -``` - -ou - -``` sql -SOURCE(ODBC( - db 'DatabaseName' - table 'SchemaName.TableName' - connection_string 'DSN=some_parameters' - invalidate_query 'SQL_QUERY' -)) -``` - -Définition des champs: - -- `db` – Name of the database. Omit it if the database name is set in the `` paramètre. -- `table` – Name of the table and schema if exists. -- `connection_string` – Connection string. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Mise à jour des dictionnaires](external-dicts-dict-lifetime.md). - -ClickHouse reçoit des symboles de citation D'ODBC-driver et cite tous les paramètres des requêtes au pilote, il est donc nécessaire de définir le nom de la table en conséquence sur le cas du nom de la table dans la base de données. - -Si vous avez des problèmes avec des encodages lors de l'utilisation d'Oracle, consultez le [FAQ](../../../faq/general.md#oracle-odbc-encodings) article. - -### Vulnérabilité connue de la fonctionnalité du dictionnaire ODBC {#known-vulnerability-of-the-odbc-dictionary-functionality} - -!!! attention "Attention" - Lors de la connexion à la base de données via le paramètre de connexion du pilote ODBC `Servername` peut être substitué. Dans ce cas, les valeurs de `USERNAME` et `PASSWORD` de `odbc.ini` sont envoyés au serveur distant et peuvent être compromis. - -**Exemple d'utilisation non sécurisée** - -Configurons unixODBC pour PostgreSQL. Le contenu de `/etc/odbc.ini`: - -``` text -[gregtest] -Driver = /usr/lib/psqlodbca.so -Servername = localhost -PORT = 5432 -DATABASE = test_db -#OPTION = 3 -USERNAME = test -PASSWORD = test -``` - -Si vous faites alors une requête telle que - -``` sql -SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); -``` - -Le pilote ODBC enverra des valeurs de `USERNAME` et `PASSWORD` de `odbc.ini` de `some-server.com`. - -### Exemple de connexion Postgresql {#example-of-connecting-postgresql} - -Ubuntu OS. - -Installation d'unixODBC et du pilote ODBC pour PostgreSQL: - -``` bash -$ sudo apt-get install -y unixodbc odbcinst odbc-postgresql -``` - -Configuration `/etc/odbc.ini` (ou `~/.odbc.ini`): - -``` text - [DEFAULT] - Driver = myconnection - - [myconnection] - Description = PostgreSQL connection to my_db - Driver = PostgreSQL Unicode - Database = my_db - Servername = 127.0.0.1 - UserName = username - Password = password - Port = 5432 - Protocol = 9.3 - ReadOnly = No - RowVersioning = No - ShowSystemTables = No - ConnSettings = -``` - -La configuration du dictionnaire dans ClickHouse: - -``` xml - - - table_name - - - - - DSN=myconnection - postgresql_table
-
- - - 300 - 360 - - - - - - - id - - - some_column - UInt64 - 0 - - -
-
-``` - -ou - -``` sql -CREATE DICTIONARY table_name ( - id UInt64, - some_column UInt64 DEFAULT 0 -) -PRIMARY KEY id -SOURCE(ODBC(connection_string 'DSN=myconnection' table 'postgresql_table')) -LAYOUT(HASHED()) -LIFETIME(MIN 300 MAX 360) -``` - -Vous devrez peut-être modifier `odbc.ini` pour spécifier le chemin d'accès complet à la bibliothèque avec le conducteur `DRIVER=/usr/local/lib/psqlodbcw.so`. - -### Exemple de connexion à MS SQL Server {#example-of-connecting-ms-sql-server} - -Ubuntu OS. - -Installation du pilote: : - -``` bash -$ sudo apt-get install tdsodbc freetds-bin sqsh -``` - -Configuration du pilote: - -``` bash - $ cat /etc/freetds/freetds.conf - ... - - [MSSQL] - host = 192.168.56.101 - port = 1433 - tds version = 7.0 - client charset = UTF-8 - - $ cat /etc/odbcinst.ini - ... - - [FreeTDS] - Description = FreeTDS - Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so - Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so - FileUsage = 1 - UsageCount = 5 - - $ cat ~/.odbc.ini - ... - - [MSSQL] - Description = FreeTDS - Driver = FreeTDS - Servername = MSSQL - Database = test - UID = test - PWD = test - Port = 1433 -``` - -Configuration du dictionnaire dans ClickHouse: - -``` xml - - - test - - - dict
- DSN=MSSQL;UID=test;PWD=test -
- - - - 300 - 360 - - - - - - - - - k - - - s - String - - - -
-
-``` - -ou - -``` sql -CREATE DICTIONARY test ( - k UInt64, - s String DEFAULT '' -) -PRIMARY KEY k -SOURCE(ODBC(table 'dict' connection_string 'DSN=MSSQL;UID=test;PWD=test')) -LAYOUT(FLAT()) -LIFETIME(MIN 300 MAX 360) -``` - -## DBMS {#dbms} - -### Mysql {#dicts-external_dicts_dict_sources-mysql} - -Exemple de paramètres: - -``` xml - - - 3306 - clickhouse - qwerty - - example01-1 - 1 - - - example01-2 - 1 - - db_name - table_name
- id=10 - SQL_QUERY -
- -``` - -ou - -``` sql -SOURCE(MYSQL( - port 3306 - user 'clickhouse' - password 'qwerty' - replica(host 'example01-1' priority 1) - replica(host 'example01-2' priority 1) - db 'db_name' - table 'table_name' - where 'id=10' - invalidate_query 'SQL_QUERY' -)) -``` - -Définition des champs: - -- `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). - -- `user` – Name of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). - -- `password` – Password of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). - -- `replica` – Section of replica configurations. There can be multiple sections. - - - `replica/host` – The MySQL host. - - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. - -- `db` – Name of the database. - -- `table` – Name of the table. - -- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause dans MySQL, par exemple, `id > 10 AND id < 20`. Paramètre facultatif. - -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Mise à jour des dictionnaires](external-dicts-dict-lifetime.md). - -MySQL peut être connecté sur un hôte local via des sockets. Pour ce faire, définissez `host` et `socket`. - -Exemple de paramètres: - -``` xml - - - localhost - /path/to/socket/file.sock - clickhouse - qwerty - db_name - table_name
- id=10 - SQL_QUERY -
- -``` - -ou - -``` sql -SOURCE(MYSQL( - host 'localhost' - socket '/path/to/socket/file.sock' - user 'clickhouse' - password 'qwerty' - db 'db_name' - table 'table_name' - where 'id=10' - invalidate_query 'SQL_QUERY' -)) -``` - -### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} - -Exemple de paramètres: - -``` xml - - - example01-01-1 - 9000 - default - - default - ids
- id=10 -
- -``` - -ou - -``` sql -SOURCE(CLICKHOUSE( - host 'example01-01-1' - port 9000 - user 'default' - password '' - db 'default' - table 'ids' - where 'id=10' -)) -``` - -Définition des champs: - -- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distribué](../../../engines/table-engines/special/distributed.md) table et entrez-le dans les configurations suivantes. -- `port` – The port on the ClickHouse server. -- `user` – Name of the ClickHouse user. -- `password` – Password of the ClickHouse user. -- `db` – Name of the database. -- `table` – Name of the table. -- `where` – The selection criteria. May be omitted. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Mise à jour des dictionnaires](external-dicts-dict-lifetime.md). - -### Mongodb {#dicts-external_dicts_dict_sources-mongodb} - -Exemple de paramètres: - -``` xml - - - localhost - 27017 - - - test - dictionary_source - - -``` - -ou - -``` sql -SOURCE(MONGO( - host 'localhost' - port 27017 - user '' - password '' - db 'test' - collection 'dictionary_source' -)) -``` - -Définition des champs: - -- `host` – The MongoDB host. -- `port` – The port on the MongoDB server. -- `user` – Name of the MongoDB user. -- `password` – Password of the MongoDB user. -- `db` – Name of the database. -- `collection` – Name of the collection. - -### Redis {#dicts-external_dicts_dict_sources-redis} - -Exemple de paramètres: - -``` xml - - - localhost - 6379 - simple - 0 - - -``` - -ou - -``` sql -SOURCE(REDIS( - host 'localhost' - port 6379 - storage_type 'simple' - db_index 0 -)) -``` - -Définition des champs: - -- `host` – The Redis host. -- `port` – The port on the Redis server. -- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` est pour les sources simples et pour les sources à clé unique hachées, `hash_map` est pour les sources hachées avec deux clés. Les sources À Distance et les sources de cache à clé complexe ne sont pas prises en charge. Peut être omis, la valeur par défaut est `simple`. -- `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md deleted file mode 100644 index 1b9215baf06..00000000000 --- a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 44 -toc_title: "Cl\xE9 et champs du dictionnaire" ---- - -# Clé et champs du dictionnaire {#dictionary-key-and-fields} - -Le `` la clause décrit la clé du dictionnaire et les champs disponibles pour les requêtes. - -Description XML: - -``` xml - - - - Id - - - - - - - ... - - - -``` - -Les attributs sont décrits dans les éléments: - -- `` — [La colonne de la clé](external-dicts-dict-structure.md#ext_dict_structure-key). -- `` — [Colonne de données](external-dicts-dict-structure.md#ext_dict_structure-attributes). Il peut y avoir un certain nombre d'attributs. - -Requête DDL: - -``` sql -CREATE DICTIONARY dict_name ( - Id UInt64, - -- attributes -) -PRIMARY KEY Id -... -``` - -Les attributs sont décrits dans le corps de la requête: - -- `PRIMARY KEY` — [La colonne de la clé](external-dicts-dict-structure.md#ext_dict_structure-key) -- `AttrName AttrType` — [Colonne de données](external-dicts-dict-structure.md#ext_dict_structure-attributes). Il peut y avoir un certain nombre d'attributs. - -## Clé {#ext_dict_structure-key} - -ClickHouse prend en charge les types de clés suivants: - -- Touche numérique. `UInt64`. Défini dans le `` tag ou en utilisant `PRIMARY KEY` mot. -- Clé Composite. Ensemble de valeurs de types différents. Défini dans la balise `` ou `PRIMARY KEY` mot. - -Une structure xml peut contenir `` ou ``. DDL-requête doit contenir unique `PRIMARY KEY`. - -!!! warning "Avertissement" - Vous ne devez pas décrire clé comme un attribut. - -### Touche Numérique {#ext_dict-numeric-key} - -Type: `UInt64`. - -Exemple de Configuration: - -``` xml - - Id - -``` - -Champs de Configuration: - -- `name` – The name of the column with keys. - -Pour DDL-requête: - -``` sql -CREATE DICTIONARY ( - Id UInt64, - ... -) -PRIMARY KEY Id -... -``` - -- `PRIMARY KEY` – The name of the column with keys. - -### Clé Composite {#composite-key} - -La clé peut être un `tuple` de tous les types de champs. Le [disposition](external-dicts-dict-layout.md) dans ce cas, doit être `complex_key_hashed` ou `complex_key_cache`. - -!!! tip "Conseil" - Une clé composite peut être constitué d'un seul élément. Cela permet d'utiliser une chaîne comme clé, par exemple. - -La structure de clé est définie dans l'élément ``. Les principaux champs sont spécifiés dans le même format que le dictionnaire [attribut](external-dicts-dict-structure.md). Exemple: - -``` xml - - - - field1 - String - - - field2 - UInt32 - - ... - -... -``` - -ou - -``` sql -CREATE DICTIONARY ( - field1 String, - field2 String - ... -) -PRIMARY KEY field1, field2 -... -``` - -Pour une requête à l' `dictGet*` fonction, un tuple est passé comme clé. Exemple: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. - -## Attribut {#ext_dict_structure-attributes} - -Exemple de Configuration: - -``` xml - - ... - - Name - ClickHouseDataType - - rand64() - true - true - true - - -``` - -ou - -``` sql -CREATE DICTIONARY somename ( - Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID -) -``` - -Champs de Configuration: - -| Balise | Description | Requis | -|------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------| -| `name` | Nom de la colonne. | Oui | -| `type` | Type de données ClickHouse.
ClickHouse tente de convertir la valeur du dictionnaire vers le type de données spécifié. Par exemple, pour MySQL, le champ peut être `TEXT`, `VARCHAR`, ou `BLOB` dans la table source MySQL, mais il peut être téléchargé comme `String` à ClickHouse.
[Nullable](../../../sql-reference/data-types/nullable.md) n'est pas pris en charge. | Oui | -| `null_value` | Valeur par défaut pour un élément inexistant.
Dans l'exemple, c'est une chaîne vide. Vous ne pouvez pas utiliser `NULL` dans ce domaine. | Oui | -| `expression` | [Expression](../../syntax.md#syntax-expressions) que ClickHouse s'exécute sur la valeur.
L'expression peut être un nom de colonne dans la base de données SQL distante. Ainsi, vous pouvez l'utiliser pour créer un alias pour la colonne à distance.

Valeur par défaut: aucune expression. | Aucun | -| `hierarchical` | Si `true`, l'attribut contient la valeur d'un parent clé de la clé actuelle. Voir [Dictionnaires Hiérarchiques](external-dicts-dict-hierarchical.md).

Valeur par défaut: `false`. | Aucun | -| `injective` | Indicateur qui indique si le `id -> attribute` l'image est [injective](https://en.wikipedia.org/wiki/Injective_function).
Si `true`, ClickHouse peut automatiquement placer après le `GROUP BY` clause les requêtes aux dictionnaires avec injection. Habituellement, il réduit considérablement le montant de ces demandes.

Valeur par défaut: `false`. | Aucun | -| `is_object_id` | Indicateur qui indique si la requête est exécutée pour un document MongoDB par `ObjectID`.

Valeur par défaut: `false`. | Aucun | - -## Voir Aussi {#see-also} - -- [Fonctions pour travailler avec des dictionnaires externes](../../../sql-reference/functions/ext-dict-functions.md). - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md b/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md deleted file mode 100644 index 3bb8884df2f..00000000000 --- a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 40 -toc_title: Configuration D'un dictionnaire externe ---- - -# Configuration D'un dictionnaire externe {#dicts-external-dicts-dict} - -Si dictionary est configuré à l'aide d'un fichier xml, than dictionary configuration a la structure suivante: - -``` xml - - dict_name - - - - - - - - - - - - - - - - - -``` - -Correspondant [DDL-requête](../../statements/create.md#create-dictionary-query) a la structure suivante: - -``` sql -CREATE DICTIONARY dict_name -( - ... -- attributes -) -PRIMARY KEY ... -- complex or single key configuration -SOURCE(...) -- Source configuration -LAYOUT(...) -- Memory layout configuration -LIFETIME(...) -- Lifetime of dictionary in memory -``` - -- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. -- [source](external-dicts-dict-sources.md) — Source of the dictionary. -- [disposition](external-dicts-dict-layout.md) — Dictionary layout in memory. -- [structure](external-dicts-dict-structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. -- [vie](external-dicts-dict-lifetime.md) — Frequency of dictionary updates. - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts.md deleted file mode 100644 index d68b7a7f112..00000000000 --- a/docs/fr/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 39 -toc_title: "Description G\xE9n\xE9rale" ---- - -# Dictionnaires Externes {#dicts-external-dicts} - -Vous pouvez ajouter vos propres dictionnaires à partir de diverses sources de données. La source de données d'un dictionnaire peut être un texte local ou un fichier exécutable, une ressource HTTP(S) ou un autre SGBD. Pour plus d'informations, voir “[Sources pour les dictionnaires externes](external-dicts-dict-sources.md)”. - -ClickHouse: - -- Stocke entièrement ou partiellement les dictionnaires en RAM. -- Met à jour périodiquement les dictionnaires et charge dynamiquement les valeurs manquantes. En d'autres mots, les dictionnaires peuvent être chargés dynamiquement. -- Permet de créer des dictionnaires externes avec des fichiers xml ou [Les requêtes DDL](../../statements/create.md#create-dictionary-query). - -La configuration des dictionnaires externes peut être située dans un ou plusieurs fichiers xml. Le chemin d'accès à la configuration spécifiée dans le [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) paramètre. - -Les dictionnaires peuvent être chargés au démarrage du serveur ou à la première utilisation, en fonction [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) paramètre. - -Le [dictionnaire](../../../operations/system-tables.md#system_tables-dictionaries) la table système contient des informations sur les dictionnaires configurés sur le serveur. Pour chaque dictionnaire, vous pouvez y trouver: - -- Statut du dictionnaire. -- Paramètres de Configuration. -- Des métriques telles que la quantité de RAM allouée pour le dictionnaire ou un certain nombre de requêtes depuis que le dictionnaire a été chargé avec succès. - -Le fichier de configuration du dictionnaire a le format suivant: - -``` xml - - An optional element with any content. Ignored by the ClickHouse server. - - - /etc/metrika.xml - - - - - - - - -``` - -Vous pouvez [configurer](external-dicts-dict.md) le nombre de dictionnaires dans le même fichier. - -[Requêtes DDL pour les dictionnaires](../../statements/create.md#create-dictionary-query) ne nécessite aucun enregistrement supplémentaire dans la configuration du serveur. Ils permettent de travailler avec des dictionnaires en tant qu'entités de première classe, comme des tables ou des vues. - -!!! attention "Attention" - Vous pouvez convertir les valeurs pour un petit dictionnaire en le décrivant dans un `SELECT` requête (voir la [transformer](../../../sql-reference/functions/other-functions.md) fonction). Cette fonctionnalité n'est pas liée aux dictionnaires externes. - -## Voir Aussi {#ext-dicts-see-also} - -- [Configuration D'un dictionnaire externe](external-dicts-dict.md) -- [Stockage des dictionnaires en mémoire](external-dicts-dict-layout.md) -- [Mises À Jour Du Dictionnaire](external-dicts-dict-lifetime.md) -- [Sources de dictionnaires externes](external-dicts-dict-sources.md) -- [Clé et champs du dictionnaire](external-dicts-dict-structure.md) -- [Fonctions pour travailler avec des dictionnaires externes](../../../sql-reference/functions/ext-dict-functions.md) - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/fr/sql-reference/dictionaries/external-dictionaries/index.md b/docs/fr/sql-reference/dictionaries/external-dictionaries/index.md deleted file mode 100644 index 109220205dd..00000000000 --- a/docs/fr/sql-reference/dictionaries/external-dictionaries/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Dictionnaires Externes -toc_priority: 37 ---- - - diff --git a/docs/fr/sql-reference/dictionaries/index.md b/docs/fr/sql-reference/dictionaries/index.md deleted file mode 100644 index 3ec31085cc5..00000000000 --- a/docs/fr/sql-reference/dictionaries/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Dictionnaire -toc_priority: 35 -toc_title: Introduction ---- - -# Dictionnaire {#dictionaries} - -Un dictionnaire est une cartographie (`key -> attributes`) qui est pratique pour différents types de listes de référence. - -ClickHouse prend en charge des fonctions spéciales pour travailler avec des dictionnaires qui peuvent être utilisés dans les requêtes. Il est plus facile et plus efficace d'utiliser des dictionnaires avec des fonctions que par une `JOIN` avec des tableaux de référence. - -[NULL](../../sql-reference/syntax.md#null-literal) les valeurs ne peuvent pas être stockées dans un dictionnaire. - -Supports ClickHouse: - -- [Construit-dans les dictionnaires](internal-dicts.md#internal_dicts) avec un [ensemble de fonctions](../../sql-reference/functions/ym-dict-functions.md). -- [Plug-in (externe) dictionnaires](external-dictionaries/external-dicts.md#dicts-external-dicts) avec un [ensemble de fonctions](../../sql-reference/functions/ext-dict-functions.md). - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/fr/sql-reference/dictionaries/internal-dicts.md b/docs/fr/sql-reference/dictionaries/internal-dicts.md deleted file mode 100644 index 607936031a1..00000000000 --- a/docs/fr/sql-reference/dictionaries/internal-dicts.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 39 -toc_title: Dictionnaires Internes ---- - -# Dictionnaires Internes {#internal_dicts} - -ClickHouse contient une fonction intégrée pour travailler avec une géobase. - -Cela vous permet de: - -- Utilisez L'ID d'une région pour obtenir son nom dans la langue souhaitée. -- Utilisez L'ID d'une région pour obtenir L'ID d'une ville, d'une région, d'un district fédéral, d'un pays ou d'un continent. -- Vérifiez si une région fait partie d'une autre région. -- Obtenez une chaîne de régions parentes. - -Toutes les fonctions prennent en charge “translocality,” la capacité d'utiliser simultanément différentes perspectives sur la propriété de la région. Pour plus d'informations, consultez la section “Functions for working with Yandex.Metrica dictionaries”. - -Les dictionnaires internes sont désactivés dans le package par défaut. -Pour les activer, décommentez les paramètres `path_to_regions_hierarchy_file` et `path_to_regions_names_files` dans le fichier de configuration du serveur. - -La géobase est chargée à partir de fichiers texte. - -Place de la `regions_hierarchy*.txt` les fichiers dans le `path_to_regions_hierarchy_file` répertoire. Ce paramètre de configuration doit contenir le chemin `regions_hierarchy.txt` fichier (la hiérarchie régionale par défaut), et les autres fichiers (`regions_hierarchy_ua.txt`) doit être situé dans le même répertoire. - -Mettre le `regions_names_*.txt` les fichiers dans le `path_to_regions_names_files` répertoire. - -Vous pouvez également créer ces fichiers vous-même. Le format de fichier est le suivant: - -`regions_hierarchy*.txt`: TabSeparated (pas d'en-tête), colonnes: - -- région de l'ID (`UInt32`) -- ID de région parent (`UInt32`) -- type de région (`UInt8`): 1-continent, 3-pays, 4-district fédéral, 5-région, 6-ville; les autres types n'ont pas de valeurs -- population (`UInt32`) — optional column - -`regions_names_*.txt`: TabSeparated (pas d'en-tête), colonnes: - -- région de l'ID (`UInt32`) -- nom de la région (`String`) — Can't contain tabs or line feeds, even escaped ones. - -Un tableau plat est utilisé pour stocker dans la RAM. Pour cette raison, les ID ne devraient pas dépasser un million. - -Les dictionnaires peuvent être mis à jour sans redémarrer le serveur. Cependant, l'ensemble des dictionnaires n'est pas mis à jour. -Pour les mises à jour, les temps de modification du fichier sont vérifiés. Si un fichier a été modifié, le dictionnaire est mis à jour. -L'intervalle de vérification des modifications est configuré dans le `builtin_dictionaries_reload_interval` paramètre. -Les mises à jour du dictionnaire (autres que le chargement lors de la première utilisation) ne bloquent pas les requêtes. Lors des mises à jour, les requêtes utilisent les anciennes versions des dictionnaires. Si une erreur se produit pendant une mise à jour, l'erreur est écrite dans le journal du serveur et les requêtes continuent d'utiliser l'ancienne version des dictionnaires. - -Nous vous recommandons de mettre à jour périodiquement les dictionnaires avec la géobase. Lors d'une mise à jour, générez de nouveaux fichiers et écrivez-les dans un emplacement séparé. Lorsque tout est prêt, renommez - les en fichiers utilisés par le serveur. - -Il existe également des fonctions pour travailler avec les identifiants du système d'exploitation et Yandex.Moteurs de recherche Metrica, mais ils ne devraient pas être utilisés. - -[Article Original](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/fr/sql-reference/functions/arithmetic-functions.md b/docs/fr/sql-reference/functions/arithmetic-functions.md deleted file mode 100644 index c35fb104236..00000000000 --- a/docs/fr/sql-reference/functions/arithmetic-functions.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 35 -toc_title: "Arithm\xE9tique" ---- - -# Fonctions Arithmétiques {#arithmetic-functions} - -Pour toutes les fonctions arithmétiques, le type de résultat est calculé comme le plus petit type de nombre dans lequel le résultat correspond, s'il existe un tel type. Le minimum est pris simultanément sur la base du nombre de bits, s'il est signé, et s'il flotte. S'il n'y a pas assez de bits, le type de bits le plus élevé est pris. - -Exemple: - -``` sql -SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 + 0 + 0) -``` - -``` text -┌─toTypeName(0)─┬─toTypeName(plus(0, 0))─┬─toTypeName(plus(plus(0, 0), 0))─┬─toTypeName(plus(plus(plus(0, 0), 0), 0))─┐ -│ UInt8 │ UInt16 │ UInt32 │ UInt64 │ -└───────────────┴────────────────────────┴─────────────────────────────────┴──────────────────────────────────────────┘ -``` - -Les fonctions arithmétiques fonctionnent pour n'importe quelle paire de types de UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32 ou Float64. - -Le débordement est produit de la même manière qu'en C++. - -## plus (A, B), opérateur a + b {#plusa-b-a-b-operator} - -Calcule la somme des nombres. -Vous pouvez également ajouter des nombres entiers avec une date ou la date et l'heure. Dans le cas d'une date, Ajouter un entier signifie ajouter le nombre de jours correspondant. Pour une date avec l'heure, cela signifie ajouter le nombre de secondes correspondant. - -## moins (A, B), opérateur a - b {#minusa-b-a-b-operator} - -Calcule la différence. Le résultat est toujours signé. - -You can also calculate integer numbers from a date or date with time. The idea is the same – see above for ‘plus’. - -## la multiplication(a, b), a \* et b \* de l'opérateur {#multiplya-b-a-b-operator} - -Calcule le produit des nombres. - -## diviser (A, B), opérateur a / b {#dividea-b-a-b-operator} - -Calcule le quotient des nombres. Le type de résultat est toujours un type à virgule flottante. -Il n'est pas de division entière. Pour la division entière, utilisez le ‘intDiv’ fonction. -En divisant par zéro vous obtenez ‘inf’, ‘-inf’, ou ‘nan’. - -## intDiv (a, b) {#intdiva-b} - -Calcule le quotient des nombres. Divise en entiers, arrondi vers le bas (par la valeur absolue). -Une exception est levée en divisant par zéro ou en divisant un nombre négatif minimal par moins un. - -## intDivOrZero(a, b) {#intdivorzeroa-b} - -Diffère de ‘intDiv’ en ce sens qu'il renvoie zéro en divisant par zéro ou en divisant un nombre négatif minimal par moins un. - -## opérateur modulo(A, B), A % B {#moduloa-b-a-b-operator} - -Calcule le reste après la division. -Si les arguments sont des nombres à virgule flottante, ils sont pré-convertis en entiers en supprimant la partie décimale. -Le reste est pris dans le même sens qu'en C++. La division tronquée est utilisée pour les nombres négatifs. -Une exception est levée en divisant par zéro ou en divisant un nombre négatif minimal par moins un. - -## moduloOrZero (a, b) {#moduloorzeroa-b} - -Diffère de ‘modulo’ en ce sens qu'il renvoie zéro lorsque le diviseur est nul. - -## annuler (a), - un opérateur {#negatea-a-operator} - -Calcule un nombre avec le signe inverse. Le résultat est toujours signé. - -## abs(un) {#arithm_func-abs} - -Calcule la valeur absolue d'un nombre (un). Autrement dit, si un \< 0, Il renvoie-A. pour les types non signés, il ne fait rien. Pour les types entiers signés, il renvoie un nombre non signé. - -## pgcd(a, b) {#gcda-b} - -Renvoie le plus grand diviseur commun des nombres. -Une exception est levée en divisant par zéro ou en divisant un nombre négatif minimal par moins un. - -## ppcm(a, b) {#lcma-b} - -Renvoie le multiple le moins commun des nombres. -Une exception est levée en divisant par zéro ou en divisant un nombre négatif minimal par moins un. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/fr/sql-reference/functions/array-functions.md b/docs/fr/sql-reference/functions/array-functions.md deleted file mode 100644 index 40568841372..00000000000 --- a/docs/fr/sql-reference/functions/array-functions.md +++ /dev/null @@ -1,1061 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 46 -toc_title: Travailler avec des tableaux ---- - -# Fonctions pour travailler avec des tableaux {#functions-for-working-with-arrays} - -## vide {#function-empty} - -Retourne 1 pour un tableau vide, ou 0 pour un non-vide. -Le type de résultat est UInt8. -La fonction fonctionne également pour les chaînes. - -## notEmpty {#function-notempty} - -Retourne 0 pour un tableau vide, ou 1 pour un non-vide. -Le type de résultat est UInt8. -La fonction fonctionne également pour les chaînes. - -## longueur {#array_functions-length} - -Retourne le nombre d'éléments dans le tableau. -Le type de résultat est UInt64. -La fonction fonctionne également pour les chaînes. - -## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} - -## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} - -## emptyArrayFloat32, emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} - -## emptyArrayDate, emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} - -## emptyArrayString {#emptyarraystring} - -Accepte zéro argument et renvoie un tableau vide du type approprié. - -## emptyArrayToSingle {#emptyarraytosingle} - -Accepte un tableau vide et renvoie un élément de tableau qui est égal à la valeur par défaut. - -## plage (fin), Plage(début, fin \[, étape\]) {#rangeend-rangestart-end-step} - -Retourne un tableau de nombres du début à la fin-1 par étape. -Si l'argument `start` n'est pas spécifié, la valeur par défaut est 0. -Si l'argument `step` n'est pas spécifié, la valeur par défaut est 1. -Il se comporte presque comme pythonic `range`. Mais la différence est que tous les types d'arguments doivent être `UInt` nombre. -Juste au cas où, une exception est levée si des tableaux d'une longueur totale de plus de 100 000 000 d'éléments sont créés dans un bloc de données. - -## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} - -Crée un tableau à partir des arguments de la fonction. -Les arguments doivent être des constantes et avoir des types qui ont le plus petit type commun. Au moins un argument doit être passé, sinon il n'est pas clair quel type de tableau créer. Qui est, vous ne pouvez pas utiliser cette fonction pour créer un tableau vide (pour ce faire, utilisez la ‘emptyArray\*’ la fonction décrite ci-dessus). -Retourne un ‘Array(T)’ type de résultat, où ‘T’ est le plus petit type commun parmi les arguments passés. - -## arrayConcat {#arrayconcat} - -Combine des tableaux passés comme arguments. - -``` sql -arrayConcat(arrays) -``` - -**Paramètre** - -- `arrays` – Arbitrary number of arguments of [Tableau](../../sql-reference/data-types/array.md) type. - **Exemple** - - - -``` sql -SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res -``` - -``` text -┌─res───────────┐ -│ [1,2,3,4,5,6] │ -└───────────────┘ -``` - -## arrayElement(arr, n), opérateur arr\[n\] {#arrayelementarr-n-operator-arrn} - -Récupérer l'élément avec l'index `n` à partir du tableau `arr`. `n` doit être n'importe quel type entier. -Les index dans un tableau commencent à partir d'un. -Les index négatifs sont pris en charge. Dans ce cas, il sélectionne l'élément correspondant numérotées à partir de la fin. Exemple, `arr[-1]` est le dernier élément du tableau. - -Si l'index est en dehors des limites d'un tableau, il renvoie une valeur (0 pour les nombres, une chaîne vide pour les cordes, etc.), sauf pour le cas avec un tableau non constant et un index constant 0 (dans ce cas, il y aura une erreur `Array indices are 1-based`). - -## a (arr, elem) {#hasarr-elem} - -Vérifie si le ‘arr’ tableau a la ‘elem’ élément. -Retourne 0 si l'élément n'est pas dans le tableau, ou 1 si elle l'est. - -`NULL` est traitée comme une valeur. - -``` sql -SELECT has([1, 2, NULL], NULL) -``` - -``` text -┌─has([1, 2, NULL], NULL)─┐ -│ 1 │ -└─────────────────────────┘ -``` - -## hasAll {#hasall} - -Vérifie si un tableau est un sous-ensemble de l'autre. - -``` sql -hasAll(set, subset) -``` - -**Paramètre** - -- `set` – Array of any type with a set of elements. -- `subset` – Array of any type with elements that should be tested to be a subset of `set`. - -**Les valeurs de retour** - -- `1`, si `set` contient tous les éléments de `subset`. -- `0`, autrement. - -**Propriétés particulières** - -- Un tableau vide est un sous-ensemble d'un tableau quelconque. -- `Null` traitée comme une valeur. -- Ordre des valeurs dans les deux tableaux n'a pas d'importance. - -**Exemple** - -`SELECT hasAll([], [])` retours 1. - -`SELECT hasAll([1, Null], [Null])` retours 1. - -`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` retours 1. - -`SELECT hasAll(['a', 'b'], ['a'])` retours 1. - -`SELECT hasAll([1], ['a'])` renvoie 0. - -`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` renvoie 0. - -## hasAny {#hasany} - -Vérifie si deux tableaux ont une intersection par certains éléments. - -``` sql -hasAny(array1, array2) -``` - -**Paramètre** - -- `array1` – Array of any type with a set of elements. -- `array2` – Array of any type with a set of elements. - -**Les valeurs de retour** - -- `1`, si `array1` et `array2` avoir un élément similaire au moins. -- `0`, autrement. - -**Propriétés particulières** - -- `Null` traitée comme une valeur. -- Ordre des valeurs dans les deux tableaux n'a pas d'importance. - -**Exemple** - -`SELECT hasAny([1], [])` retourner `0`. - -`SELECT hasAny([Null], [Null, 1])` retourner `1`. - -`SELECT hasAny([-128, 1., 512], [1])` retourner `1`. - -`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` retourner `0`. - -`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` retourner `1`. - -## indexOf (arr, x) {#indexofarr-x} - -Renvoie l'index de la première ‘x’ élément (à partir de 1) s'il est dans le tableau, ou 0 s'il ne l'est pas. - -Exemple: - -``` sql -SELECT indexOf([1, 3, NULL, NULL], NULL) -``` - -``` text -┌─indexOf([1, 3, NULL, NULL], NULL)─┐ -│ 3 │ -└───────────────────────────────────┘ -``` - -Ensemble d'éléments de `NULL` sont traités comme des valeurs normales. - -## countEqual (arr, x) {#countequalarr-x} - -Renvoie le nombre d'éléments dans le tableau égal à X. équivalent à arrayCount (elem - \> elem = x, arr). - -`NULL` les éléments sont traités comme des valeurs distinctes. - -Exemple: - -``` sql -SELECT countEqual([1, 2, NULL, NULL], NULL) -``` - -``` text -┌─countEqual([1, 2, NULL, NULL], NULL)─┐ -│ 2 │ -└──────────────────────────────────────┘ -``` - -## arrayEnumerate (arr) {#array_functions-arrayenumerate} - -Returns the array \[1, 2, 3, …, length (arr) \] - -Cette fonction est normalement utilisée avec ARRAY JOIN. Il permet de compter quelque chose une seule fois pour chaque tableau après l'application de la jointure de tableau. Exemple: - -``` sql -SELECT - count() AS Reaches, - countIf(num = 1) AS Hits -FROM test.hits -ARRAY JOIN - GoalsReached, - arrayEnumerate(GoalsReached) AS num -WHERE CounterID = 160656 -LIMIT 10 -``` - -``` text -┌─Reaches─┬──Hits─┐ -│ 95606 │ 31406 │ -└─────────┴───────┘ -``` - -Dans cet exemple, Reaches est le nombre de conversions (les chaînes reçues après l'application de la jointure de tableau), et Hits est le nombre de pages vues (chaînes avant la jointure de tableau). Dans ce cas particulier, vous pouvez obtenir le même résultat dans une voie plus facile: - -``` sql -SELECT - sum(length(GoalsReached)) AS Reaches, - count() AS Hits -FROM test.hits -WHERE (CounterID = 160656) AND notEmpty(GoalsReached) -``` - -``` text -┌─Reaches─┬──Hits─┐ -│ 95606 │ 31406 │ -└─────────┴───────┘ -``` - -Cette fonction peut également être utilisée dans les fonctions d'ordre supérieur. Par exemple, vous pouvez l'utiliser pour obtenir les indices de tableau pour les éléments qui correspondent à une condition. - -## arrayEnumerateUniq(arr, …) {#arrayenumerateuniqarr} - -Renvoie un tableau de la même taille que le tableau source, indiquant pour chaque élément Quelle est sa position parmi les éléments de même valeur. -Par exemple: arrayEnumerateUniq(\[10, 20, 10, 30\]) = \[1, 1, 2, 1\]. - -Cette fonction est utile lors de L'utilisation de la jointure de tableau et de l'agrégation d'éléments de tableau. -Exemple: - -``` sql -SELECT - Goals.ID AS GoalID, - sum(Sign) AS Reaches, - sumIf(Sign, num = 1) AS Visits -FROM test.visits -ARRAY JOIN - Goals, - arrayEnumerateUniq(Goals.ID) AS num -WHERE CounterID = 160656 -GROUP BY GoalID -ORDER BY Reaches DESC -LIMIT 10 -``` - -``` text -┌──GoalID─┬─Reaches─┬─Visits─┐ -│ 53225 │ 3214 │ 1097 │ -│ 2825062 │ 3188 │ 1097 │ -│ 56600 │ 2803 │ 488 │ -│ 1989037 │ 2401 │ 365 │ -│ 2830064 │ 2396 │ 910 │ -│ 1113562 │ 2372 │ 373 │ -│ 3270895 │ 2262 │ 812 │ -│ 1084657 │ 2262 │ 345 │ -│ 56599 │ 2260 │ 799 │ -│ 3271094 │ 2256 │ 812 │ -└─────────┴─────────┴────────┘ -``` - -Dans cet exemple, chaque ID d'objectif a un calcul du nombre de conversions (chaque élément de la structure de données imbriquées objectifs est un objectif atteint, que nous appelons une conversion) et le nombre de sessions. Sans array JOIN, nous aurions compté le nombre de sessions comme sum(signe). Mais dans ce cas particulier, les lignes ont été multipliées par la structure des objectifs imbriqués, donc pour compter chaque session une fois après cela, nous appliquons une condition à la valeur de arrayEnumerateUniq(Goals.ID) fonction. - -La fonction arrayEnumerateUniq peut prendre plusieurs tableaux de la même taille que les arguments. Dans ce cas, l'unicité est considérée pour les tuples d'éléments dans les mêmes positions dans tous les tableaux. - -``` sql -SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res -``` - -``` text -┌─res───────────┐ -│ [1,2,1,1,2,1] │ -└───────────────┘ -``` - -Ceci est nécessaire lors de L'utilisation de Array JOIN avec une structure de données imbriquée et une agrégation supplémentaire entre plusieurs éléments de cette structure. - -## arrayPopBack {#arraypopback} - -Supprime le dernier élément du tableau. - -``` sql -arrayPopBack(array) -``` - -**Paramètre** - -- `array` – Array. - -**Exemple** - -``` sql -SELECT arrayPopBack([1, 2, 3]) AS res -``` - -``` text -┌─res───┐ -│ [1,2] │ -└───────┘ -``` - -## arrayPopFront {#arraypopfront} - -Supprime le premier élément de la matrice. - -``` sql -arrayPopFront(array) -``` - -**Paramètre** - -- `array` – Array. - -**Exemple** - -``` sql -SELECT arrayPopFront([1, 2, 3]) AS res -``` - -``` text -┌─res───┐ -│ [2,3] │ -└───────┘ -``` - -## arrayPushBack {#arraypushback} - -Ajoute un élément à la fin du tableau. - -``` sql -arrayPushBack(array, single_value) -``` - -**Paramètre** - -- `array` – Array. -- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` type pour le type de données du tableau. Pour plus d'informations sur les types de données dans ClickHouse, voir “[Types de données](../../sql-reference/data-types/index.md#data_types)”. Peut être `NULL`. La fonction ajoute un `NULL` tableau, et le type d'éléments de tableau convertit en `Nullable`. - -**Exemple** - -``` sql -SELECT arrayPushBack(['a'], 'b') AS res -``` - -``` text -┌─res───────┐ -│ ['a','b'] │ -└───────────┘ -``` - -## arrayPushFront {#arraypushfront} - -Ajoute un élément au début du tableau. - -``` sql -arrayPushFront(array, single_value) -``` - -**Paramètre** - -- `array` – Array. -- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` type pour le type de données du tableau. Pour plus d'informations sur les types de données dans ClickHouse, voir “[Types de données](../../sql-reference/data-types/index.md#data_types)”. Peut être `NULL`. La fonction ajoute un `NULL` tableau, et le type d'éléments de tableau convertit en `Nullable`. - -**Exemple** - -``` sql -SELECT arrayPushFront(['b'], 'a') AS res -``` - -``` text -┌─res───────┐ -│ ['a','b'] │ -└───────────┘ -``` - -## arrayResize {#arrayresize} - -Les changements de la longueur du tableau. - -``` sql -arrayResize(array, size[, extender]) -``` - -**Paramètre:** - -- `array` — Array. -- `size` — Required length of the array. - - Si `size` est inférieure à la taille d'origine du tableau, le tableau est tronqué à partir de la droite. -- Si `size` est plus grande que la taille initiale du tableau, le tableau est étendu vers la droite avec `extender` valeurs ou valeurs par défaut pour le type de données des éléments du tableau. -- `extender` — Value for extending an array. Can be `NULL`. - -**Valeur renvoyée:** - -Un tableau de longueur `size`. - -**Exemples d'appels** - -``` sql -SELECT arrayResize([1], 3) -``` - -``` text -┌─arrayResize([1], 3)─┐ -│ [1,0,0] │ -└─────────────────────┘ -``` - -``` sql -SELECT arrayResize([1], 3, NULL) -``` - -``` text -┌─arrayResize([1], 3, NULL)─┐ -│ [1,NULL,NULL] │ -└───────────────────────────┘ -``` - -## arraySlice {#arrayslice} - -Retourne une tranche du tableau. - -``` sql -arraySlice(array, offset[, length]) -``` - -**Paramètre** - -- `array` – Array of data. -- `offset` – Indent from the edge of the array. A positive value indicates an offset on the left, and a negative value is an indent on the right. Numbering of the array items begins with 1. -- `length` - La longueur de la nécessaire tranche. Si vous spécifiez une valeur négative, la fonction renvoie un ouvert tranche `[offset, array_length - length)`. Si vous omettez la valeur, la fonction renvoie la tranche `[offset, the_end_of_array]`. - -**Exemple** - -``` sql -SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res -``` - -``` text -┌─res────────┐ -│ [2,NULL,4] │ -└────────────┘ -``` - -Éléments de tableau définis sur `NULL` sont traités comme des valeurs normales. - -## arraySort(\[func,\] arr, …) {#array_functions-sort} - -Trie les éléments de la `arr` tableau dans l'ordre croissant. Si l' `func` fonction est spécifiée, l'ordre de tri est déterminé par le résultat de la `func` fonction appliquée aux éléments du tableau. Si `func` accepte plusieurs arguments, le `arraySort` la fonction est passé plusieurs tableaux que les arguments de `func` correspond à. Des exemples détaillés sont présentés à la fin de `arraySort` Description. - -Exemple de tri de valeurs entières: - -``` sql -SELECT arraySort([1, 3, 3, 0]); -``` - -``` text -┌─arraySort([1, 3, 3, 0])─┐ -│ [0,1,3,3] │ -└─────────────────────────┘ -``` - -Exemple de tri des valeurs de chaîne: - -``` sql -SELECT arraySort(['hello', 'world', '!']); -``` - -``` text -┌─arraySort(['hello', 'world', '!'])─┐ -│ ['!','hello','world'] │ -└────────────────────────────────────┘ -``` - -Considérez l'ordre de tri suivant pour le `NULL`, `NaN` et `Inf` valeur: - -``` sql -SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); -``` - -``` text -┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ -│ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ -└───────────────────────────────────────────────────────────┘ -``` - -- `-Inf` les valeurs sont d'abord dans le tableau. -- `NULL` les valeurs sont les derniers dans le tableau. -- `NaN` les valeurs sont juste avant `NULL`. -- `Inf` les valeurs sont juste avant `NaN`. - -Notez que `arraySort` est un [fonction d'ordre supérieur](higher-order-functions.md). Vous pouvez passer d'une fonction lambda comme premier argument. Dans ce cas, l'ordre de classement est déterminé par le résultat de la fonction lambda appliquée aux éléments de la matrice. - -Considérons l'exemple suivant: - -``` sql -SELECT arraySort((x) -> -x, [1, 2, 3]) as res; -``` - -``` text -┌─res─────┐ -│ [3,2,1] │ -└─────────┘ -``` - -For each element of the source array, the lambda function returns the sorting key, that is, \[1 –\> -1, 2 –\> -2, 3 –\> -3\]. Since the `arraySort` fonction trie les touches dans l'ordre croissant, le résultat est \[3, 2, 1\]. Ainsi, l' `(x) –> -x` fonction lambda définit le [l'ordre décroissant](#array_functions-reverse-sort) dans un tri. - -La fonction lambda peut accepter plusieurs arguments. Dans ce cas, vous avez besoin de passer l' `arraySort` fonction plusieurs tableaux de longueur identique à laquelle correspondront les arguments de la fonction lambda. Le tableau résultant sera composé d'éléments du premier tableau d'entrée; les éléments du(des) Tableau (s) d'entrée suivant (s) spécifient les clés de tri. Exemple: - -``` sql -SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; -``` - -``` text -┌─res────────────────┐ -│ ['world', 'hello'] │ -└────────────────────┘ -``` - -Ici, les éléments qui sont passés dans le deuxième tableau (\[2, 1\]) définissent une clé de tri pour l'élément correspondant à partir du tableau source (\[‘hello’, ‘world’\]), qui est, \[‘hello’ –\> 2, ‘world’ –\> 1\]. Since the lambda function doesn't use `x`, les valeurs réelles du tableau source n'affectent pas l'ordre dans le résultat. Si, ‘hello’ sera le deuxième élément du résultat, et ‘world’ sera le premier. - -D'autres exemples sont présentés ci-dessous. - -``` sql -SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; -``` - -``` text -┌─res─────┐ -│ [2,1,0] │ -└─────────┘ -``` - -``` sql -SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; -``` - -``` text -┌─res─────┐ -│ [2,1,0] │ -└─────────┘ -``` - -!!! note "Note" - Pour améliorer l'efficacité du tri, de la [Transformation schwartzienne](https://en.wikipedia.org/wiki/Schwartzian_transform) est utilisée. - -## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} - -Trie les éléments de la `arr` tableau dans l'ordre décroissant. Si l' `func` la fonction est spécifiée, `arr` est trié en fonction du résultat de la `func` fonction appliquée aux éléments du tableau, puis le tableau trié est inversé. Si `func` accepte plusieurs arguments, le `arrayReverseSort` la fonction est passé plusieurs tableaux que les arguments de `func` correspond à. Des exemples détaillés sont présentés à la fin de `arrayReverseSort` Description. - -Exemple de tri de valeurs entières: - -``` sql -SELECT arrayReverseSort([1, 3, 3, 0]); -``` - -``` text -┌─arrayReverseSort([1, 3, 3, 0])─┐ -│ [3,3,1,0] │ -└────────────────────────────────┘ -``` - -Exemple de tri des valeurs de chaîne: - -``` sql -SELECT arrayReverseSort(['hello', 'world', '!']); -``` - -``` text -┌─arrayReverseSort(['hello', 'world', '!'])─┐ -│ ['world','hello','!'] │ -└───────────────────────────────────────────┘ -``` - -Considérez l'ordre de tri suivant pour le `NULL`, `NaN` et `Inf` valeur: - -``` sql -SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; -``` - -``` text -┌─res───────────────────────────────────┐ -│ [inf,3,2,1,-4,-inf,nan,nan,NULL,NULL] │ -└───────────────────────────────────────┘ -``` - -- `Inf` les valeurs sont d'abord dans le tableau. -- `NULL` les valeurs sont les derniers dans le tableau. -- `NaN` les valeurs sont juste avant `NULL`. -- `-Inf` les valeurs sont juste avant `NaN`. - -Notez que l' `arrayReverseSort` est un [fonction d'ordre supérieur](higher-order-functions.md). Vous pouvez passer d'une fonction lambda comme premier argument. Exemple est montré ci-dessous. - -``` sql -SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; -``` - -``` text -┌─res─────┐ -│ [1,2,3] │ -└─────────┘ -``` - -Le tableau est trié de la façon suivante: - -1. Dans un premier temps, le tableau source (\[1, 2, 3\]) est trié en fonction du résultat de la fonction lambda appliquée aux éléments du tableau. Le résultat est un tableau \[3, 2, 1\]. -2. Tableau qui est obtenu à l'étape précédente, est renversé. Donc, le résultat final est \[1, 2, 3\]. - -La fonction lambda peut accepter plusieurs arguments. Dans ce cas, vous avez besoin de passer l' `arrayReverseSort` fonction plusieurs tableaux de longueur identique à laquelle correspondront les arguments de la fonction lambda. Le tableau résultant sera composé d'éléments du premier tableau d'entrée; les éléments du(des) Tableau (s) d'entrée suivant (s) spécifient les clés de tri. Exemple: - -``` sql -SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; -``` - -``` text -┌─res───────────────┐ -│ ['hello','world'] │ -└───────────────────┘ -``` - -Dans cet exemple, le tableau est trié de la façon suivante: - -1. Au début, le tableau source (\[‘hello’, ‘world’\]) est triée selon le résultat de la fonction lambda appliquée aux éléments de tableaux. Les éléments qui sont passés dans le deuxième tableau (\[2, 1\]), définissent les clés de tri pour les éléments correspondants du tableau source. Le résultat est un tableau \[‘world’, ‘hello’\]. -2. Tableau trié lors de l'étape précédente, est renversé. Donc, le résultat final est \[‘hello’, ‘world’\]. - -D'autres exemples sont présentés ci-dessous. - -``` sql -SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; -``` - -``` text -┌─res─────┐ -│ [5,3,4] │ -└─────────┘ -``` - -``` sql -SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; -``` - -``` text -┌─res─────┐ -│ [4,3,5] │ -└─────────┘ -``` - -## arrayUniq(arr, …) {#arrayuniqarr} - -Si un argument est passé, il compte le nombre de différents éléments dans le tableau. -Si plusieurs arguments sont passés, il compte le nombre de tuples différents d'éléments aux positions correspondantes dans plusieurs tableaux. - -Si vous souhaitez obtenir une liste des éléments dans un tableau, vous pouvez utiliser arrayReduce(‘groupUniqArray’, arr). - -## arrayJoin (arr) {#array-functions-join} - -Une fonction spéciale. Voir la section [“ArrayJoin function”](array-join.md#functions_arrayjoin). - -## tableaudifférence {#arraydifference} - -Calcule la différence entre les éléments de tableau adjacents. Renvoie un tableau où le premier élément sera 0, le second est la différence entre `a[1] - a[0]`, etc. The type of elements in the resulting array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`). - -**Syntaxe** - -``` sql -arrayDifference(array) -``` - -**Paramètre** - -- `array` – [Tableau](https://clickhouse.tech/docs/en/data_types/array/). - -**Valeurs renvoyées** - -Renvoie un tableau de différences entre les éléments adjacents. - -Type: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Flottant\*](https://clickhouse.tech/docs/en/data_types/float/). - -**Exemple** - -Requête: - -``` sql -SELECT arrayDifference([1, 2, 3, 4]) -``` - -Résultat: - -``` text -┌─arrayDifference([1, 2, 3, 4])─┐ -│ [0,1,1,1] │ -└───────────────────────────────┘ -``` - -Exemple de débordement dû au type de résultat Int64: - -Requête: - -``` sql -SELECT arrayDifference([0, 10000000000000000000]) -``` - -Résultat: - -``` text -┌─arrayDifference([0, 10000000000000000000])─┐ -│ [0,-8446744073709551616] │ -└────────────────────────────────────────────┘ -``` - -## arrayDistinct {#arraydistinct} - -Prend un tableau, retourne un tableau contenant les différents éléments seulement. - -**Syntaxe** - -``` sql -arrayDistinct(array) -``` - -**Paramètre** - -- `array` – [Tableau](https://clickhouse.tech/docs/en/data_types/array/). - -**Valeurs renvoyées** - -Retourne un tableau contenant les éléments distincts. - -**Exemple** - -Requête: - -``` sql -SELECT arrayDistinct([1, 2, 2, 3, 1]) -``` - -Résultat: - -``` text -┌─arrayDistinct([1, 2, 2, 3, 1])─┐ -│ [1,2,3] │ -└────────────────────────────────┘ -``` - -## arrayEnumerateDense(arr) {#array_functions-arrayenumeratedense} - -Renvoie un tableau de la même taille que le tableau source, indiquant où chaque élément apparaît en premier dans le tableau source. - -Exemple: - -``` sql -SELECT arrayEnumerateDense([10, 20, 10, 30]) -``` - -``` text -┌─arrayEnumerateDense([10, 20, 10, 30])─┐ -│ [1,2,1,3] │ -└───────────────────────────────────────┘ -``` - -## arrayIntersect (arr) {#array-functions-arrayintersect} - -Prend plusieurs tableaux, retourne un tableau avec des éléments présents dans tous les tableaux source. L'ordre des éléments dans le tableau résultant est le même que dans le premier tableau. - -Exemple: - -``` sql -SELECT - arrayIntersect([1, 2], [1, 3], [2, 3]) AS no_intersect, - arrayIntersect([1, 2], [1, 3], [1, 4]) AS intersect -``` - -``` text -┌─no_intersect─┬─intersect─┐ -│ [] │ [1] │ -└──────────────┴───────────┘ -``` - -## arrayReduce {#arrayreduce} - -Applique une fonction d'agrégation aux éléments du tableau et renvoie son résultat. Le nom de la fonction d'agrégation est passé sous forme de chaîne entre guillemets simples `'max'`, `'sum'`. Lorsque vous utilisez des fonctions d'agrégat paramétriques, le paramètre est indiqué après le nom de la fonction entre parenthèses `'uniqUpTo(6)'`. - -**Syntaxe** - -``` sql -arrayReduce(agg_func, arr1, arr2, ..., arrN) -``` - -**Paramètre** - -- `agg_func` — The name of an aggregate function which should be a constant [chaîne](../../sql-reference/data-types/string.md). -- `arr` — Any number of [tableau](../../sql-reference/data-types/array.md) tapez les colonnes comme paramètres de la fonction d'agrégation. - -**Valeur renvoyée** - -**Exemple** - -``` sql -SELECT arrayReduce('max', [1, 2, 3]) -``` - -``` text -┌─arrayReduce('max', [1, 2, 3])─┐ -│ 3 │ -└───────────────────────────────┘ -``` - -Si une fonction d'agrégation prend plusieurs arguments, cette fonction doit être appliqué à plusieurs ensembles de même taille. - -``` sql -SELECT arrayReduce('maxIf', [3, 5], [1, 0]) -``` - -``` text -┌─arrayReduce('maxIf', [3, 5], [1, 0])─┐ -│ 3 │ -└──────────────────────────────────────┘ -``` - -Exemple avec une fonction d'agrégat paramétrique: - -``` sql -SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) -``` - -``` text -┌─arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])─┐ -│ 4 │ -└─────────────────────────────────────────────────────────────┘ -``` - -## arrayReduceInRanges {#arrayreduceinranges} - -Applique une fonction d'agrégation d'éléments de tableau dans des plages et retourne un tableau contenant le résultat correspondant à chaque gamme. La fonction retourne le même résultat que plusieurs `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`. - -**Syntaxe** - -``` sql -arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) -``` - -**Paramètre** - -- `agg_func` — The name of an aggregate function which should be a constant [chaîne](../../sql-reference/data-types/string.md). -- `ranges` — The ranges to aggretate which should be an [tableau](../../sql-reference/data-types/array.md) de [tuple](../../sql-reference/data-types/tuple.md) qui contient l'indice et la longueur de chaque plage. -- `arr` — Any number of [tableau](../../sql-reference/data-types/array.md) tapez les colonnes comme paramètres de la fonction d'agrégation. - -**Valeur renvoyée** - -**Exemple** - -``` sql -SELECT arrayReduceInRanges( - 'sum', - [(1, 5), (2, 3), (3, 4), (4, 4)], - [1000000, 200000, 30000, 4000, 500, 60, 7] -) AS res -``` - -``` text -┌─res─────────────────────────┐ -│ [1234500,234000,34560,4567] │ -└─────────────────────────────┘ -``` - -## arrayReverse(arr) {#arrayreverse} - -Retourne un tableau de la même taille que l'original tableau contenant les éléments dans l'ordre inverse. - -Exemple: - -``` sql -SELECT arrayReverse([1, 2, 3]) -``` - -``` text -┌─arrayReverse([1, 2, 3])─┐ -│ [3,2,1] │ -└─────────────────────────┘ -``` - -## inverse (arr) {#array-functions-reverse} - -Synonyme de [“arrayReverse”](#arrayreverse) - -## arrayFlatten {#arrayflatten} - -Convertit un tableau de tableaux dans un tableau associatif. - -Fonction: - -- S'applique à toute profondeur de tableaux imbriqués. -- Ne change pas les tableaux qui sont déjà plats. - -Le tableau aplati contient tous les éléments de tous les tableaux source. - -**Syntaxe** - -``` sql -flatten(array_of_arrays) -``` - -Alias: `flatten`. - -**Paramètre** - -- `array_of_arrays` — [Tableau](../../sql-reference/data-types/array.md) de tableaux. Exemple, `[[1,2,3], [4,5]]`. - -**Exemple** - -``` sql -SELECT flatten([[[1]], [[2], [3]]]) -``` - -``` text -┌─flatten(array(array([1]), array([2], [3])))─┐ -│ [1,2,3] │ -└─────────────────────────────────────────────┘ -``` - -## arrayCompact {#arraycompact} - -Supprime les éléments en double consécutifs d'un tableau. L'ordre des valeurs de résultat est déterminée par l'ordre dans le tableau source. - -**Syntaxe** - -``` sql -arrayCompact(arr) -``` - -**Paramètre** - -`arr` — The [tableau](../../sql-reference/data-types/array.md) inspecter. - -**Valeur renvoyée** - -Le tableau sans doublon. - -Type: `Array`. - -**Exemple** - -Requête: - -``` sql -SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) -``` - -Résultat: - -``` text -┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ -│ [1,nan,nan,2,3] │ -└────────────────────────────────────────────┘ -``` - -## arrayZip {#arrayzip} - -Combine plusieurs tableaux en un seul tableau. Le tableau résultant contient les éléments correspondants des tableaux source regroupés en tuples dans l'ordre des arguments listés. - -**Syntaxe** - -``` sql -arrayZip(arr1, arr2, ..., arrN) -``` - -**Paramètre** - -- `arrN` — [Tableau](../data-types/array.md). - -La fonction peut prendre n'importe quel nombre de tableaux de différents types. Tous les tableaux doivent être de taille égale. - -**Valeur renvoyée** - -- Tableau avec des éléments des tableaux source regroupés en [tuple](../data-types/tuple.md). Types de données dans le tuple sont les mêmes que les types de l'entrée des tableaux et dans le même ordre que les tableaux sont passés. - -Type: [Tableau](../data-types/array.md). - -**Exemple** - -Requête: - -``` sql -SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]) -``` - -Résultat: - -``` text -┌─arrayZip(['a', 'b', 'c'], [5, 2, 1])─┐ -│ [('a',5),('b',2),('c',1)] │ -└──────────────────────────────────────┘ -``` - -## arrayAUC {#arrayauc} - -Calculer AUC (zone sous la courbe, qui est un concept dans l'apprentissage automatique, voir plus de détails: https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve). - -**Syntaxe** - -``` sql -arrayAUC(arr_scores, arr_labels) -``` - -**Paramètre** -- `arr_scores` — scores prediction model gives. -- `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negtive sample. - -**Valeur renvoyée** -Renvoie la valeur AUC avec le type Float64. - -**Exemple** -Requête: - -``` sql -select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) -``` - -Résultat: - -``` text -┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ -│ 0.75 │ -└────────────────────────────────────────---──┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/fr/sql-reference/functions/array-join.md b/docs/fr/sql-reference/functions/array-join.md deleted file mode 100644 index 859e801994d..00000000000 --- a/docs/fr/sql-reference/functions/array-join.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 61 -toc_title: arrayJoin ---- - -# fonction arrayJoin {#functions_arrayjoin} - -C'est un très inhabituelle de la fonction. - -Les fonctions normales ne modifient pas un ensemble de lignes, mais modifient simplement les valeurs de chaque ligne (map). -Les fonctions d'agrégation compriment un ensemble de lignes (plier ou réduire). -Le ‘arrayJoin’ la fonction prend chaque ligne et génère un ensemble de lignes (dépliante). - -Cette fonction prend un tableau comme argument et propage la ligne source à plusieurs lignes pour le nombre d'éléments dans le tableau. -Toutes les valeurs des colonnes sont simplement copiés, sauf les valeurs dans la colonne où cette fonction est appliquée; elle est remplacée par la valeur correspondante de tableau. - -Une requête peut utiliser plusieurs `arrayJoin` fonction. Dans ce cas, la transformation est effectuée plusieurs fois. - -Notez la syntaxe de jointure de tableau dans la requête SELECT, qui offre des possibilités plus larges. - -Exemple: - -``` sql -SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src -``` - -``` text -┌─dst─┬─\'Hello\'─┬─src─────┐ -│ 1 │ Hello │ [1,2,3] │ -│ 2 │ Hello │ [1,2,3] │ -│ 3 │ Hello │ [1,2,3] │ -└─────┴───────────┴─────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/fr/sql-reference/functions/bit-functions.md b/docs/fr/sql-reference/functions/bit-functions.md deleted file mode 100644 index 7b8795815f2..00000000000 --- a/docs/fr/sql-reference/functions/bit-functions.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 48 -toc_title: Bit ---- - -# Peu De Fonctions {#bit-functions} - -Les fonctions Bit fonctionnent pour n'importe quelle paire de types de UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32 ou Float64. - -Le type de résultat est un entier avec des bits égaux aux bits maximum de ses arguments. Si au moins l'un des arguments est signé, le résultat est un signé nombre. Si un argument est un nombre à virgule flottante, Il est converti en Int64. - -## bitAnd (a, b) {#bitanda-b} - -## bitOr (a, b) {#bitora-b} - -## bitXor (a, b) {#bitxora-b} - -## bitNot (a) {#bitnota} - -## bitShiftLeft (A, b) {#bitshiftlefta-b} - -## bitShiftRight (A, b) {#bitshiftrighta-b} - -## bitRotateLeft (a, b) {#bitrotatelefta-b} - -## bitRotateRight (a, b) {#bitrotaterighta-b} - -## bitTest {#bittest} - -Prend tout entier et le convertit en [forme binaire](https://en.wikipedia.org/wiki/Binary_number) renvoie la valeur d'un bit à la position spécifiée. Le compte à rebours commence à partir de 0 de la droite vers la gauche. - -**Syntaxe** - -``` sql -SELECT bitTest(number, index) -``` - -**Paramètre** - -- `number` – integer number. -- `index` – position of bit. - -**Valeurs renvoyées** - -Renvoie une valeur de bit à la position spécifiée. - -Type: `UInt8`. - -**Exemple** - -Par exemple, le nombre 43 dans le système numérique de base-2 (binaire) est 101011. - -Requête: - -``` sql -SELECT bitTest(43, 1) -``` - -Résultat: - -``` text -┌─bitTest(43, 1)─┐ -│ 1 │ -└────────────────┘ -``` - -Un autre exemple: - -Requête: - -``` sql -SELECT bitTest(43, 2) -``` - -Résultat: - -``` text -┌─bitTest(43, 2)─┐ -│ 0 │ -└────────────────┘ -``` - -## bitTestAll {#bittestall} - -Renvoie le résultat de [logique de conjonction](https://en.wikipedia.org/wiki/Logical_conjunction) (Et opérateur) de tous les bits à des positions données. Le compte à rebours commence à partir de 0 de la droite vers la gauche. - -La conjonction pour les opérations bit à bit: - -0 AND 0 = 0 - -0 AND 1 = 0 - -1 AND 0 = 0 - -1 AND 1 = 1 - -**Syntaxe** - -``` sql -SELECT bitTestAll(number, index1, index2, index3, index4, ...) -``` - -**Paramètre** - -- `number` – integer number. -- `index1`, `index2`, `index3`, `index4` – positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) est vrai si et seulement si toutes ses positions sont remplies (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). - -**Valeurs renvoyées** - -Retourne le résultat de la conjonction logique. - -Type: `UInt8`. - -**Exemple** - -Par exemple, le nombre 43 dans le système numérique de base-2 (binaire) est 101011. - -Requête: - -``` sql -SELECT bitTestAll(43, 0, 1, 3, 5) -``` - -Résultat: - -``` text -┌─bitTestAll(43, 0, 1, 3, 5)─┐ -│ 1 │ -└────────────────────────────┘ -``` - -Un autre exemple: - -Requête: - -``` sql -SELECT bitTestAll(43, 0, 1, 3, 5, 2) -``` - -Résultat: - -``` text -┌─bitTestAll(43, 0, 1, 3, 5, 2)─┐ -│ 0 │ -└───────────────────────────────┘ -``` - -## bitTestAny {#bittestany} - -Renvoie le résultat de [disjonction logique](https://en.wikipedia.org/wiki/Logical_disjunction) (Ou opérateur) de tous les bits à des positions données. Le compte à rebours commence à partir de 0 de la droite vers la gauche. - -La disjonction pour les opérations binaires: - -0 OR 0 = 0 - -0 OR 1 = 1 - -1 OR 0 = 1 - -1 OR 1 = 1 - -**Syntaxe** - -``` sql -SELECT bitTestAny(number, index1, index2, index3, index4, ...) -``` - -**Paramètre** - -- `number` – integer number. -- `index1`, `index2`, `index3`, `index4` – positions of bit. - -**Valeurs renvoyées** - -Renvoie le résultat de la disjuction logique. - -Type: `UInt8`. - -**Exemple** - -Par exemple, le nombre 43 dans le système numérique de base-2 (binaire) est 101011. - -Requête: - -``` sql -SELECT bitTestAny(43, 0, 2) -``` - -Résultat: - -``` text -┌─bitTestAny(43, 0, 2)─┐ -│ 1 │ -└──────────────────────┘ -``` - -Un autre exemple: - -Requête: - -``` sql -SELECT bitTestAny(43, 4, 2) -``` - -Résultat: - -``` text -┌─bitTestAny(43, 4, 2)─┐ -│ 0 │ -└──────────────────────┘ -``` - -## bitCount {#bitcount} - -Calcule le nombre de bits mis à un dans la représentation binaire d'un nombre. - -**Syntaxe** - -``` sql -bitCount(x) -``` - -**Paramètre** - -- `x` — [Entier](../../sql-reference/data-types/int-uint.md) ou [virgule flottante](../../sql-reference/data-types/float.md) nombre. La fonction utilise la représentation de la valeur en mémoire. Il permet de financer les nombres à virgule flottante. - -**Valeur renvoyée** - -- Nombre de bits défini sur un dans le numéro d'entrée. - -La fonction ne convertit pas la valeur d'entrée en un type plus grand ([l'extension du signe](https://en.wikipedia.org/wiki/Sign_extension)). Ainsi, par exemple, `bitCount(toUInt8(-1)) = 8`. - -Type: `UInt8`. - -**Exemple** - -Prenez par exemple le numéro 333. Sa représentation binaire: 0000000101001101. - -Requête: - -``` sql -SELECT bitCount(333) -``` - -Résultat: - -``` text -┌─bitCount(333)─┐ -│ 5 │ -└───────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/fr/sql-reference/functions/bitmap-functions.md b/docs/fr/sql-reference/functions/bitmap-functions.md deleted file mode 100644 index 15cb68ffc52..00000000000 --- a/docs/fr/sql-reference/functions/bitmap-functions.md +++ /dev/null @@ -1,496 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 49 -toc_title: Bitmap ---- - -# Fonctions De Bitmap {#bitmap-functions} - -Les fonctions Bitmap fonctionnent pour le calcul de la valeur de L'objet de deux bitmaps, il s'agit de renvoyer un nouveau bitmap ou une cardinalité tout en utilisant le calcul de la formule, tel que and, or, xor, and not, etc. - -Il existe 2 types de méthodes de construction pour L'objet Bitmap. L'un doit être construit par la fonction d'agrégation groupBitmap avec-State, l'autre doit être construit par L'objet Array. Il est également de convertir L'objet Bitmap en objet tableau. - -RoaringBitmap est enveloppé dans une structure de données pendant le stockage réel des objets Bitmap. Lorsque la cardinalité est inférieure ou égale à 32, elle utilise Set objet. Lorsque la cardinalité est supérieure à 32, elle utilise l'objet RoaringBitmap. C'est pourquoi le stockage de faible cardinalité jeu est plus rapide. - -Pour plus d'informations sur RoaringBitmap, voir: [CRoaring](https://github.com/RoaringBitmap/CRoaring). - -## bitmapBuild {#bitmap_functions-bitmapbuild} - -Construire un bitmap à partir d'un tableau entier non signé. - -``` sql -bitmapBuild(array) -``` - -**Paramètre** - -- `array` – unsigned integer array. - -**Exemple** - -``` sql -SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) -``` - -``` text -┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ -│  │ AggregateFunction(groupBitmap, UInt8) │ -└─────┴──────────────────────────────────────────────┘ -``` - -## bitmapToArray {#bitmaptoarray} - -Convertir bitmap en tableau entier. - -``` sql -bitmapToArray(bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - -``` text -┌─res─────────┐ -│ [1,2,3,4,5] │ -└─────────────┘ -``` - -## bitmapSubsetInRange {#bitmap-functions-bitmapsubsetinrange} - -Retourne le sous-ensemble dans la plage spécifiée (n'inclut pas le range_end). - -``` sql -bitmapSubsetInRange(bitmap, range_start, range_end) -``` - -**Paramètre** - -- `bitmap` – [Objet Bitmap](#bitmap_functions-bitmapbuild). -- `range_start` – range start point. Type: [UInt32](../../sql-reference/data-types/int-uint.md). -- `range_end` – range end point(excluded). Type: [UInt32](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -``` sql -SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res -``` - -``` text -┌─res───────────────┐ -│ [30,31,32,33,100] │ -└───────────────────┘ -``` - -## bitmapSubsetLimit {#bitmapsubsetlimit} - -Crée un sous-ensemble de bitmap avec n éléments pris entre `range_start` et `cardinality_limit`. - -**Syntaxe** - -``` sql -bitmapSubsetLimit(bitmap, range_start, cardinality_limit) -``` - -**Paramètre** - -- `bitmap` – [Objet Bitmap](#bitmap_functions-bitmapbuild). -- `range_start` – The subset starting point. Type: [UInt32](../../sql-reference/data-types/int-uint.md). -- `cardinality_limit` – The subset cardinality upper limit. Type: [UInt32](../../sql-reference/data-types/int-uint.md). - -**Valeur renvoyée** - -Ensemble. - -Type: `Bitmap object`. - -**Exemple** - -Requête: - -``` sql -SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res -``` - -Résultat: - -``` text -┌─res───────────────────────┐ -│ [30,31,32,33,100,200,500] │ -└───────────────────────────┘ -``` - -## bitmapContains {#bitmap_functions-bitmapcontains} - -Vérifie si le bitmap contient un élément. - -``` sql -bitmapContains(haystack, needle) -``` - -**Paramètre** - -- `haystack` – [Objet Bitmap](#bitmap_functions-bitmapbuild) où la fonction recherche. -- `needle` – Value that the function searches. Type: [UInt32](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- 0 — If `haystack` ne contient pas de `needle`. -- 1 — If `haystack` contenir `needle`. - -Type: `UInt8`. - -**Exemple** - -``` sql -SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## bitmapHasAny {#bitmaphasany} - -Vérifie si deux bitmaps ont une intersection par certains éléments. - -``` sql -bitmapHasAny(bitmap1, bitmap2) -``` - -Si vous êtes sûr que `bitmap2` contient strictement un élément, envisagez d'utiliser le [bitmapContains](#bitmap_functions-bitmapcontains) fonction. Cela fonctionne plus efficacement. - -**Paramètre** - -- `bitmap*` – bitmap object. - -**Les valeurs de retour** - -- `1`, si `bitmap1` et `bitmap2` avoir un élément similaire au moins. -- `0`, autrement. - -**Exemple** - -``` sql -SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## bitmapHasAll {#bitmaphasall} - -Analogue à `hasAll(array, array)` renvoie 1 si le premier bitmap contient tous les éléments du second, 0 sinon. -Si le deuxième argument est un bitmap vide, alors renvoie 1. - -``` sql -bitmapHasAll(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res -``` - -``` text -┌─res─┐ -│ 0 │ -└─────┘ -``` - -## bitmapCardinality {#bitmapcardinality} - -Retrun bitmap cardinalité de type UInt64. - -``` sql -bitmapCardinality(bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - -``` text -┌─res─┐ -│ 5 │ -└─────┘ -``` - -## bitmapMin {#bitmapmin} - -Retrun la plus petite valeur de type UInt64 dans l'ensemble, UINT32_MAX si l'ensemble est vide. - - bitmapMin(bitmap) - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - - ┌─res─┐ - │ 1 │ - └─────┘ - -## bitmapMax {#bitmapmax} - -Retrun la plus grande valeur de type UInt64 dans l'ensemble, 0 si l'ensemble est vide. - - bitmapMax(bitmap) - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - - ┌─res─┐ - │ 5 │ - └─────┘ - -## bitmapTransform {#bitmaptransform} - -Transformer un tableau de valeurs d'une image à l'autre tableau de valeurs, le résultat est une nouvelle image. - - bitmapTransform(bitmap, from_array, to_array) - -**Paramètre** - -- `bitmap` – bitmap object. -- `from_array` – UInt32 array. For idx in range \[0, from_array.size()), if bitmap contains from_array\[idx\], then replace it with to_array\[idx\]. Note that the result depends on array ordering if there are common elements between from_array and to_array. -- `to_array` – UInt32 array, its size shall be the same to from_array. - -**Exemple** - -``` sql -SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res -``` - - ┌─res───────────────────┐ - │ [1,3,4,6,7,8,9,10,20] │ - └───────────────────────┘ - -## bitmapAnd {#bitmapand} - -Deux bitmap et calcul, le résultat est un nouveau bitmap. - -``` sql -bitmapAnd(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - -``` text -┌─res─┐ -│ [3] │ -└─────┘ -``` - -## bitmapOr {#bitmapor} - -Deux bitmap ou calcul, le résultat est un nouveau bitmap. - -``` sql -bitmapOr(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - -``` text -┌─res─────────┐ -│ [1,2,3,4,5] │ -└─────────────┘ -``` - -## bitmapXor {#bitmapxor} - -Deux bitmap xor calcul, le résultat est une nouvelle image. - -``` sql -bitmapXor(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - -``` text -┌─res───────┐ -│ [1,2,4,5] │ -└───────────┘ -``` - -## bitmapetnot {#bitmapandnot} - -Deux Bitmap andnot calcul, le résultat est un nouveau bitmap. - -``` sql -bitmapAndnot(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - -``` text -┌─res───┐ -│ [1,2] │ -└───────┘ -``` - -## bitmapetcardinalité {#bitmapandcardinality} - -Deux bitmap et calcul, retour cardinalité de type UInt64. - -``` sql -bitmapAndCardinality(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## bitmapOrCardinality {#bitmaporcardinality} - -Deux bitmap ou calcul, retour cardinalité de type UInt64. - -``` sql -bitmapOrCardinality(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - -``` text -┌─res─┐ -│ 5 │ -└─────┘ -``` - -## bitmapXorCardinality {#bitmapxorcardinality} - -Deux bitmap XOR calcul, retour cardinalité de type UInt64. - -``` sql -bitmapXorCardinality(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - -``` text -┌─res─┐ -│ 4 │ -└─────┘ -``` - -## bitmapetnotcardinality {#bitmapandnotcardinality} - -Deux bitmap andnot calcul, retour cardinalité de type UInt64. - -``` sql -bitmapAndnotCardinality(bitmap,bitmap) -``` - -**Paramètre** - -- `bitmap` – bitmap object. - -**Exemple** - -``` sql -SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - -``` text -┌─res─┐ -│ 2 │ -└─────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/fr/sql-reference/functions/comparison-functions.md b/docs/fr/sql-reference/functions/comparison-functions.md deleted file mode 100644 index a5008c676fa..00000000000 --- a/docs/fr/sql-reference/functions/comparison-functions.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 36 -toc_title: Comparaison ---- - -# Fonctions De Comparaison {#comparison-functions} - -Les fonctions de comparaison renvoient toujours 0 ou 1 (Uint8). - -Les types suivants peuvent être comparés: - -- nombre -- cordes et cordes fixes -- date -- dates avec heures - -au sein de chaque groupe, mais pas entre différents groupes. - -Par exemple, vous ne pouvez pas comparer une date avec une chaîne. Vous devez utiliser une fonction pour convertir la chaîne en une date, ou vice versa. - -Les chaînes sont comparées par octets. Une courte chaîne est plus petite que toutes les chaînes qui commencent par elle et qui contiennent au moins un caractère de plus. - -## égal, A = B et a = = b opérateur {#function-equals} - -## notEquals, a ! opérateur= b et a \<\> b {#function-notequals} - -## moins, opérateur \< {#function-less} - -## de plus, \> opérateur {#function-greater} - -## lessOrEquals, \< = opérateur {#function-lessorequals} - -## greaterOrEquals, \> = opérateur {#function-greaterorequals} - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/fr/sql-reference/functions/conditional-functions.md b/docs/fr/sql-reference/functions/conditional-functions.md deleted file mode 100644 index 3912b49aa6a..00000000000 --- a/docs/fr/sql-reference/functions/conditional-functions.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 43 -toc_title: 'Conditionnel ' ---- - -# Fonctions Conditionnelles {#conditional-functions} - -## si {#if} - -Contrôle la ramification conditionnelle. Contrairement à la plupart des systèmes, ClickHouse évalue toujours les deux expressions `then` et `else`. - -**Syntaxe** - -``` sql -SELECT if(cond, then, else) -``` - -Si la condition `cond` renvoie une valeur non nulle, retourne le résultat de l'expression `then` et le résultat de l'expression `else`, si présent, est ignoré. Si l' `cond` est égal à zéro ou `NULL` alors le résultat de la `then` l'expression est ignorée et le résultat de `else` expression, si elle est présente, est renvoyée. - -**Paramètre** - -- `cond` – The condition for evaluation that can be zero or not. The type is UInt8, Nullable(UInt8) or NULL. -- `then` - L'expression à renvoyer si la condition est remplie. -- `else` - L'expression à renvoyer si la condition n'est pas remplie. - -**Valeurs renvoyées** - -La fonction s'exécute `then` et `else` expressions et retourne son résultat, selon que la condition `cond` fini par être zéro ou pas. - -**Exemple** - -Requête: - -``` sql -SELECT if(1, plus(2, 2), plus(2, 6)) -``` - -Résultat: - -``` text -┌─plus(2, 2)─┐ -│ 4 │ -└────────────┘ -``` - -Requête: - -``` sql -SELECT if(0, plus(2, 2), plus(2, 6)) -``` - -Résultat: - -``` text -┌─plus(2, 6)─┐ -│ 8 │ -└────────────┘ -``` - -- `then` et `else` doit avoir le type commun le plus bas. - -**Exemple:** - -Prendre cette `LEFT_RIGHT` table: - -``` sql -SELECT * -FROM LEFT_RIGHT - -┌─left─┬─right─┐ -│ ᴺᵁᴸᴸ │ 4 │ -│ 1 │ 3 │ -│ 2 │ 2 │ -│ 3 │ 1 │ -│ 4 │ ᴺᵁᴸᴸ │ -└──────┴───────┘ -``` - -La requête suivante compare `left` et `right` valeur: - -``` sql -SELECT - left, - right, - if(left < right, 'left is smaller than right', 'right is greater or equal than left') AS is_smaller -FROM LEFT_RIGHT -WHERE isNotNull(left) AND isNotNull(right) - -┌─left─┬─right─┬─is_smaller──────────────────────────┐ -│ 1 │ 3 │ left is smaller than right │ -│ 2 │ 2 │ right is greater or equal than left │ -│ 3 │ 1 │ right is greater or equal than left │ -└──────┴───────┴─────────────────────────────────────┘ -``` - -Note: `NULL` les valeurs ne sont pas utilisés dans cet exemple, vérifier [Valeurs nulles dans les conditions](#null-values-in-conditionals) section. - -## Opérateur Ternaire {#ternary-operator} - -Il fonctionne même comme `if` fonction. - -Syntaxe: `cond ? then : else` - -Retourner `then` si l' `cond` renvoie la valeur vrai (supérieur à zéro), sinon renvoie `else`. - -- `cond` doit être de type de `UInt8`, et `then` et `else` doit avoir le type commun le plus bas. - -- `then` et `else` peut être `NULL` - -**Voir aussi** - -- [ifNotFinite](other-functions.md#ifnotfinite). - -## multiIf {#multiif} - -Permet d'écrire le [CASE](../operators/index.md#operator_case) opérateur plus compacte dans la requête. - -Syntaxe: `multiIf(cond_1, then_1, cond_2, then_2, ..., else)` - -**Paramètre:** - -- `cond_N` — The condition for the function to return `then_N`. -- `then_N` — The result of the function when executed. -- `else` — The result of the function if none of the conditions is met. - -La fonction accepte `2N+1` paramètre. - -**Valeurs renvoyées** - -La fonction renvoie l'une des valeurs `then_N` ou `else` selon les conditions `cond_N`. - -**Exemple** - -En utilisant à nouveau `LEFT_RIGHT` table. - -``` sql -SELECT - left, - right, - multiIf(left < right, 'left is smaller', left > right, 'left is greater', left = right, 'Both equal', 'Null value') AS result -FROM LEFT_RIGHT - -┌─left─┬─right─┬─result──────────┐ -│ ᴺᵁᴸᴸ │ 4 │ Null value │ -│ 1 │ 3 │ left is smaller │ -│ 2 │ 2 │ Both equal │ -│ 3 │ 1 │ left is greater │ -│ 4 │ ᴺᵁᴸᴸ │ Null value │ -└──────┴───────┴─────────────────┘ -``` - -## Utilisation Directe Des Résultats Conditionnels {#using-conditional-results-directly} - -Les conditions entraînent toujours `0`, `1` ou `NULL`. Vous pouvez donc utiliser des résultats conditionnels directement comme ceci: - -``` sql -SELECT left < right AS is_small -FROM LEFT_RIGHT - -┌─is_small─┐ -│ ᴺᵁᴸᴸ │ -│ 1 │ -│ 0 │ -│ 0 │ -│ ᴺᵁᴸᴸ │ -└──────────┘ -``` - -## Valeurs nulles dans les conditions {#null-values-in-conditionals} - -Lorsque `NULL` les valeurs sont impliqués dans des conditions, le résultat sera également `NULL`. - -``` sql -SELECT - NULL < 1, - 2 < NULL, - NULL < NULL, - NULL = NULL - -┌─less(NULL, 1)─┬─less(2, NULL)─┬─less(NULL, NULL)─┬─equals(NULL, NULL)─┐ -│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -└───────────────┴───────────────┴──────────────────┴────────────────────┘ -``` - -Donc, vous devriez construire vos requêtes avec soin si les types sont `Nullable`. - -L'exemple suivant le démontre en omettant d'ajouter la condition égale à `multiIf`. - -``` sql -SELECT - left, - right, - multiIf(left < right, 'left is smaller', left > right, 'right is smaller', 'Both equal') AS faulty_result -FROM LEFT_RIGHT - -┌─left─┬─right─┬─faulty_result────┐ -│ ᴺᵁᴸᴸ │ 4 │ Both equal │ -│ 1 │ 3 │ left is smaller │ -│ 2 │ 2 │ Both equal │ -│ 3 │ 1 │ right is smaller │ -│ 4 │ ᴺᵁᴸᴸ │ Both equal │ -└──────┴───────┴──────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/fr/sql-reference/functions/date-time-functions.md b/docs/fr/sql-reference/functions/date-time-functions.md deleted file mode 100644 index d1c16b42d07..00000000000 --- a/docs/fr/sql-reference/functions/date-time-functions.md +++ /dev/null @@ -1,450 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 39 -toc_title: Travailler avec les Dates et les heures ---- - -# Fonctions pour travailler avec des Dates et des heures {#functions-for-working-with-dates-and-times} - -Support des fuseaux horaires - -Toutes les fonctions pour travailler avec la date et l'heure qui ont une logique d'utilisation pour le fuseau horaire peut accepter un second fuseau horaire argument. Exemple: Asie / Ekaterinbourg. Dans ce cas, ils utilisent le fuseau horaire spécifié au lieu du fuseau horaire local (par défaut). - -``` sql -SELECT - toDateTime('2016-06-15 23:00:00') AS time, - toDate(time) AS date_local, - toDate(time, 'Asia/Yekaterinburg') AS date_yekat, - toString(time, 'US/Samoa') AS time_samoa -``` - -``` text -┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐ -│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │ -└─────────────────────┴────────────┴────────────┴─────────────────────┘ -``` - -Seuls les fuseaux horaires qui diffèrent de L'UTC par un nombre entier d'heures sont pris en charge. - -## toTimeZone {#totimezone} - -Convertir l'heure ou la date et de l'heure au fuseau horaire spécifié. - -## toYear {#toyear} - -Convertit une date ou une date avec l'heure en un numéro UInt16 contenant le numéro d'année (AD). - -## toQuarter {#toquarter} - -Convertit une date ou une date avec l'heure en un numéro UInt8 contenant le numéro de trimestre. - -## toMonth {#tomonth} - -Convertit une date ou une date avec l'heure en un numéro UInt8 contenant le numéro de mois (1-12). - -## toDayOfYear {#todayofyear} - -Convertit une date ou une date avec l'heure en un numéro UInt16 contenant le numéro du jour de l'année (1-366). - -## toDayOfMonth {#todayofmonth} - -Convertit une date ou une date avec le temps à un UInt8 contenant le numéro du jour du mois (1-31). - -## toDayOfWeek {#todayofweek} - -Convertit une date ou une date avec l'heure en un numéro UInt8 contenant le numéro du jour de la semaine (lundi est 1, et dimanche est 7). - -## toHour {#tohour} - -Convertit une date avec l'heure en un nombre UInt8 contenant le numéro de l'heure dans l'Heure de 24 heures (0-23). -This function assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always true – even in Moscow the clocks were twice changed at a different time). - -## toMinute {#tominute} - -Convertit une date avec l'heure en un numéro UInt8 contenant le numéro de la minute de l'heure (0-59). - -## toseconde {#tosecond} - -Convertit une date avec l'heure en un nombre UInt8 contenant le numéro de la seconde dans la minute (0-59). -Les secondes intercalaires ne sont pas comptabilisés. - -## toUnixTimestamp {#to-unix-timestamp} - -Pour L'argument DateTime: convertit la valeur en sa représentation numérique interne (horodatage Unix). -For String argument: analyse datetime from string en fonction du fuseau horaire (second argument optionnel, le fuseau horaire du serveur est utilisé par défaut) et renvoie l'horodatage unix correspondant. -Pour L'argument Date: le comportement n'est pas spécifié. - -**Syntaxe** - -``` sql -toUnixTimestamp(datetime) -toUnixTimestamp(str, [timezone]) -``` - -**Valeur renvoyée** - -- Renvoie l'horodatage unix. - -Type: `UInt32`. - -**Exemple** - -Requête: - -``` sql -SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp -``` - -Résultat: - -``` text -┌─unix_timestamp─┐ -│ 1509836867 │ -└────────────────┘ -``` - -## toStartOfYear {#tostartofyear} - -Arrondit une date ou une date avec l'heure jusqu'au premier jour de l'année. -Renvoie la date. - -## toStartOfISOYear {#tostartofisoyear} - -Arrondit une date ou une date avec l'heure jusqu'au premier jour de L'année ISO. -Renvoie la date. - -## toStartOfQuarter {#tostartofquarter} - -Arrondit une date ou une date avec l'heure jusqu'au premier jour du trimestre. -Le premier jour du trimestre, soit le 1er janvier, 1er avril, 1er juillet ou 1er octobre. -Renvoie la date. - -## toStartOfMonth {#tostartofmonth} - -Arrondit une date ou une date avec l'heure jusqu'au premier jour du mois. -Renvoie la date. - -!!! attention "Attention" - Le comportement de l'analyse des dates incorrectes est spécifique à l'implémentation. ClickHouse peut renvoyer la date zéro, lancer une exception ou faire “natural” débordement. - -## toMonday {#tomonday} - -Arrondit une date ou une date avec l'heure au lundi le plus proche. -Renvoie la date. - -## toStartOfWeek (t \[, mode\]) {#tostartofweektmode} - -Arrondit une date ou une date avec l'heure au dimanche ou au lundi le plus proche par mode. -Renvoie la date. -L'argument mode fonctionne exactement comme l'argument mode de toWeek(). Pour la syntaxe à argument unique, une valeur de mode de 0 est utilisée. - -## toStartOfDay {#tostartofday} - -Arrondit une date avec le temps au début de la journée. - -## toStartOfHour {#tostartofhour} - -Arrondit une date avec le temps au début de l " heure. - -## toStartOfMinute {#tostartofminute} - -Arrondit une date avec le temps au début de la minute. - -## toStartOfFiveMinute {#tostartoffiveminute} - -Arrondit à une date avec l'heure de début de l'intervalle de cinq minutes. - -## toStartOfTenMinutes {#tostartoftenminutes} - -Arrondit une date avec le temps au début de l " intervalle de dix minutes. - -## toStartOfFifteenMinutes {#tostartoffifteenminutes} - -Arrondit la date avec le temps jusqu'au début de l'intervalle de quinze minutes. - -## toStartOfInterval(time_or_data, intervalle x Unité \[, time_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} - -Ceci est une généralisation d'autres fonctions nommées `toStartOf*`. Exemple, -`toStartOfInterval(t, INTERVAL 1 year)` renvoie la même chose que `toStartOfYear(t)`, -`toStartOfInterval(t, INTERVAL 1 month)` renvoie la même chose que `toStartOfMonth(t)`, -`toStartOfInterval(t, INTERVAL 1 day)` renvoie la même chose que `toStartOfDay(t)`, -`toStartOfInterval(t, INTERVAL 15 minute)` renvoie la même chose que `toStartOfFifteenMinutes(t)` etc. - -## toTime {#totime} - -Convertit une date avec l'heure en une certaine date fixe, tout en préservant l'heure. - -## toRelativeYearNum {#torelativeyearnum} - -Convertit une date avec l'heure ou la date, le numéro de l'année, à partir d'un certain point fixe dans le passé. - -## toRelativeQuarterNum {#torelativequarternum} - -Convertit une date avec l'heure ou la date au numéro du trimestre, à partir d'un certain point fixe dans le passé. - -## toRelativeMonthNum {#torelativemonthnum} - -Convertit une date avec l'heure ou la date au numéro du mois, à partir d'un certain point fixe dans le passé. - -## toRelativeWeekNum {#torelativeweeknum} - -Convertit une date avec l'heure ou la date, le numéro de la semaine, à partir d'un certain point fixe dans le passé. - -## toRelativeDayNum {#torelativedaynum} - -Convertit une date avec l'heure ou la date au numéro du jour, à partir d'un certain point fixe dans le passé. - -## toRelativeHourNum {#torelativehournum} - -Convertit une date avec l'heure ou la date au nombre de l'heure, à partir d'un certain point fixe dans le passé. - -## toRelativeMinuteNum {#torelativeminutenum} - -Convertit une date avec l'heure ou la date au numéro de la minute, à partir d'un certain point fixe dans le passé. - -## toRelativeSecondNum {#torelativesecondnum} - -Convertit une date avec l'heure ou la date au numéro de la seconde, à partir d'un certain point fixe dans le passé. - -## toISOYear {#toisoyear} - -Convertit une date ou une date avec l'heure en un numéro UInt16 contenant le numéro D'année ISO. - -## toISOWeek {#toisoweek} - -Convertit une date ou une date avec l'heure en un numéro UInt8 contenant le numéro de semaine ISO. - -## toWeek (date \[, mode\]) {#toweekdatemode} - -Cette fonction renvoie le numéro de semaine pour date ou datetime. La forme à deux arguments de toWeek() vous permet de spécifier si la semaine commence le dimanche ou le lundi et si la valeur de retour doit être comprise entre 0 et 53 ou entre 1 et 53. Si l'argument mode est omis, le mode par défaut est 0. -`toISOWeek()`est une fonction de compatibilité équivalente à `toWeek(date,3)`. -Le tableau suivant décrit le fonctionnement de l'argument mode. - -| Mode | Premier jour de la semaine | Gamme | Week 1 is the first week … | -|------|----------------------------|-------|----------------------------------| -| 0 | Dimanche | 0-53 | avec un dimanche cette année | -| 1 | Lundi | 0-53 | avec 4 jours ou plus cette année | -| 2 | Dimanche | 1-53 | avec un dimanche cette année | -| 3 | Lundi | 1-53 | avec 4 jours ou plus cette année | -| 4 | Dimanche | 0-53 | avec 4 jours ou plus cette année | -| 5 | Lundi | 0-53 | avec un lundi cette année | -| 6 | Dimanche | 1-53 | avec 4 jours ou plus cette année | -| 7 | Lundi | 1-53 | avec un lundi cette année | -| 8 | Dimanche | 1-53 | contient Janvier 1 | -| 9 | Lundi | 1-53 | contient Janvier 1 | - -Pour les valeurs de mode avec une signification de “with 4 or more days this year,” les semaines sont numérotées selon ISO 8601: 1988: - -- Si la semaine contenant Janvier 1 A 4 jours ou plus dans la nouvelle année, il est Semaine 1. - -- Sinon, c'est la dernière semaine de l'année précédente, et la semaine prochaine est la semaine 1. - -Pour les valeurs de mode avec une signification de “contains January 1”, la semaine contient Janvier 1 est Semaine 1. Peu importe combien de jours dans la nouvelle année la semaine contenait, même si elle contenait seulement un jour. - -``` sql -toWeek(date, [, mode][, Timezone]) -``` - -**Paramètre** - -- `date` – Date or DateTime. -- `mode` – Optional parameter, Range of values is \[0,9\], default is 0. -- `Timezone` – Optional parameter, it behaves like any other conversion function. - -**Exemple** - -``` sql -SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS week1, toWeek(date,9) AS week9; -``` - -``` text -┌───────date─┬─week0─┬─week1─┬─week9─┐ -│ 2016-12-27 │ 52 │ 52 │ 1 │ -└────────────┴───────┴───────┴───────┘ -``` - -## toYearWeek (date \[, mode\]) {#toyearweekdatemode} - -Retourne l'année et la semaine pour une date. L'année dans le résultat peut être différente de l'année dans l'argument date pour la première et la dernière semaine de l'année. - -L'argument mode fonctionne exactement comme l'argument mode de toWeek(). Pour la syntaxe à argument unique, une valeur de mode de 0 est utilisée. - -`toISOYear()`est une fonction de compatibilité équivalente à `intDiv(toYearWeek(date,3),100)`. - -**Exemple** - -``` sql -SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9; -``` - -``` text -┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐ -│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ -└────────────┴───────────┴───────────┴───────────┘ -``` - -## maintenant {#now} - -Accepte zéro argument et renvoie l'heure actuelle à l'un des moments de l'exécution de la requête. -Cette fonction renvoie une constante, même si la requête a pris beaucoup de temps à compléter. - -## aujourd' {#today} - -Accepte zéro argument et renvoie la date actuelle à l'un des moments de l'exécution de la requête. -Le même que ‘toDate(now())’. - -## hier {#yesterday} - -Accepte zéro argument et renvoie la date d'hier à l'un des moments de l'exécution de la requête. -Le même que ‘today() - 1’. - -## l'horaire de diffusion {#timeslot} - -Arrondit le temps à la demi-heure. -Cette fonction est spécifique à Yandex.Metrica, car une demi-heure est le temps minimum pour diviser une session en deux sessions si une balise de suivi affiche les pages vues consécutives d'un seul utilisateur qui diffèrent dans le temps de strictement plus que ce montant. Cela signifie que les tuples (l'ID de balise, l'ID utilisateur et l'intervalle de temps) peuvent être utilisés pour rechercher les pages vues incluses dans la session correspondante. - -## toYYYYMM {#toyyyymm} - -Convertit une date ou une date avec l'heure en un numéro UInt32 contenant le numéro d'année et de mois (AAAA \* 100 + MM). - -## toYYYYMMDD {#toyyyymmdd} - -Convertit une date ou une date avec l'heure en un numéro UInt32 contenant le numéro d'année et de mois (AAAA \* 10000 + MM \* 100 + JJ). - -## toYYYYMMDDhhmmss {#toyyyymmddhhmmss} - -Convertit une date ou une date avec l'heure en un numéro UInt64 contenant le numéro d'année et de mois (AAAA \* 10000000000 + MM \* 100000000 + DD \* 1000000 + hh \* 10000 + mm \* 100 + ss). - -## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} - -Fonction ajoute une date / DateTime intervalle à une Date / DateTime, puis retourner la Date / DateTime. Exemple: - -``` sql -WITH - toDate('2018-01-01') AS date, - toDateTime('2018-01-01 00:00:00') AS date_time -SELECT - addYears(date, 1) AS add_years_with_date, - addYears(date_time, 1) AS add_years_with_date_time -``` - -``` text -┌─add_years_with_date─┬─add_years_with_date_time─┐ -│ 2019-01-01 │ 2019-01-01 00:00:00 │ -└─────────────────────┴──────────────────────────┘ -``` - -## subtractYears, subtractMonths, subtractWeeks, subtractDays, subtractHours, subtractMinutes, subtractSeconds, subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} - -Fonction soustrayez un intervalle de Date / DateTime à une Date / DateTime, puis renvoyez la Date / DateTime. Exemple: - -``` sql -WITH - toDate('2019-01-01') AS date, - toDateTime('2019-01-01 00:00:00') AS date_time -SELECT - subtractYears(date, 1) AS subtract_years_with_date, - subtractYears(date_time, 1) AS subtract_years_with_date_time -``` - -``` text -┌─subtract_years_with_date─┬─subtract_years_with_date_time─┐ -│ 2018-01-01 │ 2018-01-01 00:00:00 │ -└──────────────────────────┴───────────────────────────────┘ -``` - -## dateDiff {#datediff} - -Renvoie la différence entre deux valeurs Date ou DateTime. - -**Syntaxe** - -``` sql -dateDiff('unit', startdate, enddate, [timezone]) -``` - -**Paramètre** - -- `unit` — Time unit, in which the returned value is expressed. [Chaîne](../syntax.md#syntax-string-literal). - - Supported values: - - | unit | - | ---- | - |second | - |minute | - |hour | - |day | - |week | - |month | - |quarter | - |year | - -- `startdate` — The first time value to compare. [Date](../../sql-reference/data-types/date.md) ou [DateTime](../../sql-reference/data-types/datetime.md). - -- `enddate` — The second time value to compare. [Date](../../sql-reference/data-types/date.md) ou [DateTime](../../sql-reference/data-types/datetime.md). - -- `timezone` — Optional parameter. If specified, it is applied to both `startdate` et `enddate`. Si non spécifié, fuseaux horaires de l' `startdate` et `enddate` sont utilisés. Si elles ne sont pas identiques, le résultat n'est pas spécifié. - -**Valeur renvoyée** - -Différence entre `startdate` et `enddate` exprimé en `unit`. - -Type: `int`. - -**Exemple** - -Requête: - -``` sql -SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00')); -``` - -Résultat: - -``` text -┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐ -│ 25 │ -└────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -## intervalle de temps (StartTime, Duration, \[, Size\]) {#timeslotsstarttime-duration-size} - -Pour un intervalle de temps commençant à ‘StartTime’ et de poursuivre pour ‘Duration’ secondes, il renvoie un tableau de moments dans le temps, composé de points de cet intervalle arrondis vers le bas à la ‘Size’ en quelques secondes. ‘Size’ est un paramètre optionnel: une constante UInt32, définie sur 1800 par défaut. -Exemple, `timeSlots(toDateTime('2012-01-01 12:20:00'), 600) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`. -Ceci est nécessaire pour rechercher les pages vues dans la session correspondante. - -## formatDateTime(Heure, Format \[, fuseau horaire\]) {#formatdatetime} - -Function formats a Time according given Format string. N.B.: Format is a constant expression, e.g. you can not have multiple formats for single result column. - -Modificateurs pris en charge pour le Format: -(“Example” colonne affiche le résultat de formatage pour le temps `2018-01-02 22:33:44`) - -| Modificateur | Description | Exemple | -|--------------|------------------------------------------------------------------------|------------| -| %C | année divisée par 100 et tronquée en entier (00-99) | 20 | -| %d | jour du mois, zero-rembourré (01-31) | 02 | -| %D | Date courte MM / JJ / AA, équivalente à %m / % d / % y | 01/02/18 | -| % e | jour du mois, rembourré dans l'espace ( 1-31) | 2 | -| %F | date courte AAAA-MM-JJ, équivalente à % Y - % m - % d | 2018-01-02 | -| %H | heure en format 24h (00-23) | 22 | -| %I | heure en format 12h (01-12) | 10 | -| %j | les jours de l'année (001-366) | 002 | -| %m | mois en nombre décimal (01-12) | 01 | -| %M | minute (00-59) | 33 | -| %et | caractère de nouvelle ligne (") | | -| %p | Désignation AM ou PM | PM | -| %R | 24 heures HH:MM temps, équivalent à %H: % M | 22:33 | -| %S | deuxième (00-59) | 44 | -| % t | horizontal-caractère de tabulation (') | | -| %T | Format d'heure ISO 8601 (HH:MM:SS), équivalent à %H: % M:%S | 22:33:44 | -| % u | ISO 8601 jour de la semaine comme numéro avec Lundi comme 1 (1-7) | 2 | -| %V | Numéro de semaine ISO 8601 (01-53) | 01 | -| %W | jour de la semaine comme un nombre décimal avec dimanche comme 0 (0-6) | 2 | -| % y | Année, deux derniers chiffres (00-99) | 18 | -| %Y | An | 2018 | -| %% | signe | % | - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/fr/sql-reference/functions/encoding-functions.md b/docs/fr/sql-reference/functions/encoding-functions.md deleted file mode 100644 index 6c99ed4f32e..00000000000 --- a/docs/fr/sql-reference/functions/encoding-functions.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 52 -toc_title: Encodage ---- - -# L'Encodage Des Fonctions {#encoding-functions} - -## char {#char} - -Retourne la chaîne avec la longueur que le nombre d'arguments passés et chaque octet a la valeur de l'argument correspondant. Accepte plusieurs arguments de types numériques. Si la valeur de l'argument est hors de portée du type de données UInt8, elle est convertie en UInt8 avec arrondi et débordement possibles. - -**Syntaxe** - -``` sql -char(number_1, [number_2, ..., number_n]); -``` - -**Paramètre** - -- `number_1, number_2, ..., number_n` — Numerical arguments interpreted as integers. Types: [Int](../../sql-reference/data-types/int-uint.md), [Flottant](../../sql-reference/data-types/float.md). - -**Valeur renvoyée** - -- une chaîne d'octets. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello -``` - -Résultat: - -``` text -┌─hello─┐ -│ hello │ -└───────┘ -``` - -Vous pouvez construire une chaîne de codage arbitraire en passant les octets correspondants. Voici un exemple pour UTF-8: - -Requête: - -``` sql -SELECT char(0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, 0xD1, 0x82) AS hello; -``` - -Résultat: - -``` text -┌─hello──┐ -│ привет │ -└────────┘ -``` - -Requête: - -``` sql -SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello; -``` - -Résultat: - -``` text -┌─hello─┐ -│ 你好 │ -└───────┘ -``` - -## Hex {#hex} - -Renvoie une chaîne contenant la représentation hexadécimale de l'argument. - -**Syntaxe** - -``` sql -hex(arg) -``` - -La fonction utilise des lettres majuscules `A-F` et ne pas utiliser de préfixes (comme `0x`) ou suffixes (comme `h`). - -Pour les arguments entiers, il imprime des chiffres hexadécimaux (“nibbles”) du plus significatif au moins significatif (big endian ou “human readable” ordre). Il commence par l'octet non nul le plus significatif (les octets de début zéro sont omis) mais imprime toujours les deux chiffres de chaque octet même si le chiffre de début est nul. - -Exemple: - -**Exemple** - -Requête: - -``` sql -SELECT hex(1); -``` - -Résultat: - -``` text -01 -``` - -Les valeurs de type `Date` et `DateTime` sont formatés comme des entiers correspondants (le nombre de jours depuis Epoch pour Date et la valeur de L'horodatage Unix pour DateTime). - -Pour `String` et `FixedString`, tous les octets sont simplement codés en deux nombres hexadécimaux. Zéro octets ne sont pas omis. - -Les valeurs des types virgule flottante et décimale sont codées comme leur représentation en mémoire. Comme nous soutenons l'architecture little endian, ils sont codés dans little endian. Zéro octets de début / fin ne sont pas omis. - -**Paramètre** - -- `arg` — A value to convert to hexadecimal. Types: [Chaîne](../../sql-reference/data-types/string.md), [UInt](../../sql-reference/data-types/int-uint.md), [Flottant](../../sql-reference/data-types/float.md), [Décimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md) ou [DateTime](../../sql-reference/data-types/datetime.md). - -**Valeur renvoyée** - -- Une chaîne avec la représentation hexadécimale de l'argument. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT hex(toFloat32(number)) as hex_presentation FROM numbers(15, 2); -``` - -Résultat: - -``` text -┌─hex_presentation─┐ -│ 00007041 │ -│ 00008041 │ -└──────────────────┘ -``` - -Requête: - -``` sql -SELECT hex(toFloat64(number)) as hex_presentation FROM numbers(15, 2); -``` - -Résultat: - -``` text -┌─hex_presentation─┐ -│ 0000000000002E40 │ -│ 0000000000003040 │ -└──────────────────┘ -``` - -## unhex (str) {#unhexstr} - -Accepte une chaîne contenant un nombre quelconque de chiffres hexadécimaux, et renvoie une chaîne contenant le correspondant octets. Prend en charge les lettres majuscules et minuscules A-F. Le nombre de chiffres hexadécimaux ne doit pas être pair. S'il est impair, le dernier chiffre est interprété comme la moitié la moins significative de l'octet 00-0F. Si la chaîne d'argument contient autre chose que des chiffres hexadécimaux, un résultat défini par l'implémentation est renvoyé (une exception n'est pas levée). -Si vous voulez convertir le résultat en un nombre, vous pouvez utiliser le ‘reverse’ et ‘reinterpretAsType’ fonction. - -## UUIDStringToNum (str) {#uuidstringtonumstr} - -Accepte une chaîne contenant 36 caractères dans le format `123e4567-e89b-12d3-a456-426655440000`, et le renvoie comme un ensemble d'octets dans un FixedString (16). - -## UUIDNumToString (str) {#uuidnumtostringstr} - -Accepte une valeur FixedString (16). Renvoie une chaîne contenant 36 caractères au format texte. - -## bitmaskToList(num) {#bitmasktolistnum} - -Accepte un entier. Renvoie une chaîne contenant la liste des puissances de deux qui totalisent le nombre source lorsqu'il est additionné. Ils sont séparés par des virgules sans espaces au format texte, dans l'ordre croissant. - -## bitmaskToArray(num) {#bitmasktoarraynum} - -Accepte un entier. Renvoie un tableau de nombres UInt64 contenant la liste des puissances de deux qui totalisent le nombre source lorsqu'il est additionné. Les numéros dans le tableau sont dans l'ordre croissant. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/fr/sql-reference/functions/ext-dict-functions.md b/docs/fr/sql-reference/functions/ext-dict-functions.md deleted file mode 100644 index 1cec307747d..00000000000 --- a/docs/fr/sql-reference/functions/ext-dict-functions.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 58 -toc_title: Travailler avec des dictionnaires externes ---- - -# Fonctions pour travailler avec des dictionnaires externes {#ext_dict_functions} - -Pour plus d'informations sur la connexion et la configuration de dictionnaires externes, voir [Dictionnaires externes](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). - -## dictGet {#dictget} - -Récupère une valeur d'un dictionnaire externe. - -``` sql -dictGet('dict_name', 'attr_name', id_expr) -dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) -``` - -**Paramètre** - -- `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `attr_name` — Name of the column of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql-reference/data-types/int-uint.md) ou [Tuple](../../sql-reference/data-types/tuple.md)- tapez la valeur en fonction de la configuration du dictionnaire. -- `default_value_expr` — Value returned if the dictionary doesn't contain a row with the `id_expr` clé. [Expression](../syntax.md#syntax-expressions) renvoyer la valeur dans le type de données configuré pour `attr_name` attribut. - -**Valeur renvoyée** - -- Si ClickHouse analyse l'attribut avec succès dans le [l'attribut type de données](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), les fonctions renvoient la valeur du dictionnaire de l'attribut qui correspond à `id_expr`. - -- Si il n'y a pas la clé, correspondant à `id_expr` dans le dictionnaire, puis: - - - `dictGet` returns the content of the `` element specified for the attribute in the dictionary configuration. - - `dictGetOrDefault` returns the value passed as the `default_value_expr` parameter. - -ClickHouse lève une exception si elle ne peut pas analyser la valeur de l'attribut ou si la valeur ne correspond pas au type de données d'attribut. - -**Exemple** - -Créer un fichier texte `ext-dict-text.csv` contenant les éléments suivants: - -``` text -1,1 -2,2 -``` - -La première colonne est `id` la deuxième colonne est `c1`. - -Configurer le dictionnaire externe: - -``` xml - - - ext-dict-test - - - /path-to/ext-dict-test.csv - CSV - - - - - - - - id - - - c1 - UInt32 - - - - 0 - - -``` - -Effectuer la requête: - -``` sql -SELECT - dictGetOrDefault('ext-dict-test', 'c1', number + 1, toUInt32(number * 10)) AS val, - toTypeName(val) AS type -FROM system.numbers -LIMIT 3 -``` - -``` text -┌─val─┬─type───┐ -│ 1 │ UInt32 │ -│ 2 │ UInt32 │ -│ 20 │ UInt32 │ -└─────┴────────┘ -``` - -**Voir Aussi** - -- [Dictionnaires Externes](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) - -## dictHas {#dicthas} - -Vérifie si une clé est présente dans un dictionnaire. - -``` sql -dictHas('dict_name', id_expr) -``` - -**Paramètre** - -- `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql-reference/data-types/int-uint.md)-le type de la valeur. - -**Valeur renvoyée** - -- 0, si il n'y a pas de clé. -- 1, si il y a une clé. - -Type: `UInt8`. - -## dictGetHierarchy {#dictgethierarchy} - -Crée un tableau contenant tous les parents d'une clé dans le [hiérarchique dictionnaire](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md). - -**Syntaxe** - -``` sql -dictGetHierarchy('dict_name', key) -``` - -**Paramètre** - -- `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `key` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql-reference/data-types/int-uint.md)-le type de la valeur. - -**Valeur renvoyée** - -- Les Parents pour la clé. - -Type: [Tableau (UInt64)](../../sql-reference/data-types/array.md). - -## dictisine {#dictisin} - -Vérifie l'ancêtre d'une clé à travers toute la chaîne hiérarchique dans le dictionnaire. - -``` sql -dictIsIn('dict_name', child_id_expr, ancestor_id_expr) -``` - -**Paramètre** - -- `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `child_id_expr` — Key to be checked. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql-reference/data-types/int-uint.md)-le type de la valeur. -- `ancestor_id_expr` — Alleged ancestor of the `child_id_expr` clé. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql-reference/data-types/int-uint.md)-le type de la valeur. - -**Valeur renvoyée** - -- 0, si `child_id_expr` n'est pas un enfant de `ancestor_id_expr`. -- 1, si `child_id_expr` est un enfant de `ancestor_id_expr` ou si `child_id_expr` est un `ancestor_id_expr`. - -Type: `UInt8`. - -## D'Autres Fonctions {#ext_dict_functions-other} - -ClickHouse prend en charge des fonctions spécialisées qui convertissent les valeurs d'attribut de dictionnaire en un type de données spécifique, quelle que soit la configuration du dictionnaire. - -Fonction: - -- `dictGetInt8`, `dictGetInt16`, `dictGetInt32`, `dictGetInt64` -- `dictGetUInt8`, `dictGetUInt16`, `dictGetUInt32`, `dictGetUInt64` -- `dictGetFloat32`, `dictGetFloat64` -- `dictGetDate` -- `dictGetDateTime` -- `dictGetUUID` -- `dictGetString` - -Toutes ces fonctions ont le `OrDefault` modification. Exemple, `dictGetDateOrDefault`. - -Syntaxe: - -``` sql -dictGet[Type]('dict_name', 'attr_name', id_expr) -dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) -``` - -**Paramètre** - -- `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `attr_name` — Name of the column of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql-reference/data-types/int-uint.md)-le type de la valeur. -- `default_value_expr` — Value which is returned if the dictionary doesn't contain a row with the `id_expr` clé. [Expression](../syntax.md#syntax-expressions) renvoyer une valeur dans le type de données configuré pour `attr_name` attribut. - -**Valeur renvoyée** - -- Si ClickHouse analyse l'attribut avec succès dans le [l'attribut type de données](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), les fonctions renvoient la valeur du dictionnaire de l'attribut qui correspond à `id_expr`. - -- Si il n'est pas demandé `id_expr` dans le dictionnaire,: - - - `dictGet[Type]` returns the content of the `` element specified for the attribute in the dictionary configuration. - - `dictGet[Type]OrDefault` returns the value passed as the `default_value_expr` parameter. - -ClickHouse lève une exception si elle ne peut pas analyser la valeur de l'attribut ou si la valeur ne correspond pas au type de données d'attribut. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/fr/sql-reference/functions/functions-for-nulls.md b/docs/fr/sql-reference/functions/functions-for-nulls.md deleted file mode 100644 index ef7be728ce7..00000000000 --- a/docs/fr/sql-reference/functions/functions-for-nulls.md +++ /dev/null @@ -1,312 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 63 -toc_title: Travailler avec des arguments nullables ---- - -# Fonctions pour travailler avec des agrégats nullables {#functions-for-working-with-nullable-aggregates} - -## isNull {#isnull} - -Vérifie si l'argument est [NULL](../../sql-reference/syntax.md#null-literal). - -``` sql -isNull(x) -``` - -**Paramètre** - -- `x` — A value with a non-compound data type. - -**Valeur renvoyée** - -- `1` si `x` être `NULL`. -- `0` si `x` n'est pas `NULL`. - -**Exemple** - -Table d'entrée - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 3 │ -└───┴──────┘ -``` - -Requête - -``` sql -SELECT x FROM t_null WHERE isNull(y) -``` - -``` text -┌─x─┐ -│ 1 │ -└───┘ -``` - -## isNotNull {#isnotnull} - -Vérifie si l'argument est [NULL](../../sql-reference/syntax.md#null-literal). - -``` sql -isNotNull(x) -``` - -**Paramètre:** - -- `x` — A value with a non-compound data type. - -**Valeur renvoyée** - -- `0` si `x` être `NULL`. -- `1` si `x` n'est pas `NULL`. - -**Exemple** - -Table d'entrée - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 3 │ -└───┴──────┘ -``` - -Requête - -``` sql -SELECT x FROM t_null WHERE isNotNull(y) -``` - -``` text -┌─x─┐ -│ 2 │ -└───┘ -``` - -## fusionner {#coalesce} - -Vérifie de gauche à droite si `NULL` les arguments ont été passés et renvoie le premier non-`NULL` argument. - -``` sql -coalesce(x,...) -``` - -**Paramètre:** - -- N'importe quel nombre de paramètres d'un type non composé. Tous les paramètres doivent être compatibles par type de données. - -**Valeurs renvoyées** - -- Le premier non-`NULL` argument. -- `NULL` si tous les arguments sont `NULL`. - -**Exemple** - -Considérez une liste de contacts qui peuvent spécifier plusieurs façons de contacter un client. - -``` text -┌─name─────┬─mail─┬─phone─────┬──icq─┐ -│ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │ -│ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -└──────────┴──────┴───────────┴──────┘ -``` - -Le `mail` et `phone` les champs sont de type Chaîne de caractères, mais la `icq` le terrain est `UInt32`, de sorte qu'il doit être converti en `String`. - -Obtenir la première méthode de contact pour le client à partir de la liste de contacts: - -``` sql -SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook -``` - -``` text -┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐ -│ client 1 │ 123-45-67 │ -│ client 2 │ ᴺᵁᴸᴸ │ -└──────────┴──────────────────────────────────────────────────────┘ -``` - -## ifNull {#ifnull} - -Renvoie une valeur alternative si l'argument principal est `NULL`. - -``` sql -ifNull(x,alt) -``` - -**Paramètre:** - -- `x` — The value to check for `NULL`. -- `alt` — The value that the function returns if `x` être `NULL`. - -**Valeurs renvoyées** - -- Valeur `x`, si `x` n'est pas `NULL`. -- Valeur `alt`, si `x` être `NULL`. - -**Exemple** - -``` sql -SELECT ifNull('a', 'b') -``` - -``` text -┌─ifNull('a', 'b')─┐ -│ a │ -└──────────────────┘ -``` - -``` sql -SELECT ifNull(NULL, 'b') -``` - -``` text -┌─ifNull(NULL, 'b')─┐ -│ b │ -└───────────────────┘ -``` - -## nullIf {#nullif} - -Retourner `NULL` si les arguments sont égaux. - -``` sql -nullIf(x, y) -``` - -**Paramètre:** - -`x`, `y` — Values for comparison. They must be compatible types, or ClickHouse will generate an exception. - -**Valeurs renvoyées** - -- `NULL` si les arguments sont égaux. -- Le `x` valeur, si les arguments ne sont pas égaux. - -**Exemple** - -``` sql -SELECT nullIf(1, 1) -``` - -``` text -┌─nullIf(1, 1)─┐ -│ ᴺᵁᴸᴸ │ -└──────────────┘ -``` - -``` sql -SELECT nullIf(1, 2) -``` - -``` text -┌─nullIf(1, 2)─┐ -│ 1 │ -└──────────────┘ -``` - -## assumeNotNull {#assumenotnull} - -Résultats dans une valeur de type [Nullable](../../sql-reference/data-types/nullable.md) pour un non- `Nullable` si la valeur n'est pas `NULL`. - -``` sql -assumeNotNull(x) -``` - -**Paramètre:** - -- `x` — The original value. - -**Valeurs renvoyées** - -- La valeur d'origine du non-`Nullable` type, si elle n'est pas `NULL`. -- La valeur par défaut pour le non-`Nullable` Tapez si la valeur d'origine était `NULL`. - -**Exemple** - -Envisager l' `t_null` table. - -``` sql -SHOW CREATE TABLE t_null -``` - -``` text -┌─statement─────────────────────────────────────────────────────────────────┐ -│ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │ -└───────────────────────────────────────────────────────────────────────────┘ -``` - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 3 │ -└───┴──────┘ -``` - -Appliquer le `assumeNotNull` la fonction de la `y` colonne. - -``` sql -SELECT assumeNotNull(y) FROM t_null -``` - -``` text -┌─assumeNotNull(y)─┐ -│ 0 │ -│ 3 │ -└──────────────────┘ -``` - -``` sql -SELECT toTypeName(assumeNotNull(y)) FROM t_null -``` - -``` text -┌─toTypeName(assumeNotNull(y))─┐ -│ Int8 │ -│ Int8 │ -└──────────────────────────────┘ -``` - -## toNullable {#tonullable} - -Convertit le type d'argument en `Nullable`. - -``` sql -toNullable(x) -``` - -**Paramètre:** - -- `x` — The value of any non-compound type. - -**Valeur renvoyée** - -- La valeur d'entrée avec un `Nullable` type. - -**Exemple** - -``` sql -SELECT toTypeName(10) -``` - -``` text -┌─toTypeName(10)─┐ -│ UInt8 │ -└────────────────┘ -``` - -``` sql -SELECT toTypeName(toNullable(10)) -``` - -``` text -┌─toTypeName(toNullable(10))─┐ -│ Nullable(UInt8) │ -└────────────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) diff --git a/docs/fr/sql-reference/functions/geo.md b/docs/fr/sql-reference/functions/geo.md deleted file mode 100644 index a89f03c7216..00000000000 --- a/docs/fr/sql-reference/functions/geo.md +++ /dev/null @@ -1,510 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 62 -toc_title: "Travailler avec des coordonn\xE9es g\xE9ographiques" ---- - -# Fonctions pour travailler avec des coordonnées géographiques {#functions-for-working-with-geographical-coordinates} - -## greatCircleDistance {#greatcircledistance} - -Calculer la distance entre deux points sur la surface de la Terre en utilisant [la formule du grand cercle](https://en.wikipedia.org/wiki/Great-circle_distance). - -``` sql -greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg) -``` - -**Les paramètres d'entrée** - -- `lon1Deg` — Longitude of the first point in degrees. Range: `[-180°, 180°]`. -- `lat1Deg` — Latitude of the first point in degrees. Range: `[-90°, 90°]`. -- `lon2Deg` — Longitude of the second point in degrees. Range: `[-180°, 180°]`. -- `lat2Deg` — Latitude of the second point in degrees. Range: `[-90°, 90°]`. - -Les valeurs positives correspondent à la latitude nord et à la longitude Est, et les valeurs négatives à la latitude Sud et à la longitude ouest. - -**Valeur renvoyée** - -La distance entre deux points sur la surface de la Terre, en mètres. - -Génère une exception lorsque les valeurs des paramètres d'entrée se situent en dehors de la plage. - -**Exemple** - -``` sql -SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) -``` - -``` text -┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐ -│ 14132374.194975413 │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## pointInEllipses {#pointinellipses} - -Vérifie si le point appartient à au moins une des ellipses. -Coordonnées géométriques sont dans le système de coordonnées Cartésiennes. - -``` sql -pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ) -``` - -**Les paramètres d'entrée** - -- `x, y` — Coordinates of a point on the plane. -- `xᵢ, yᵢ` — Coordinates of the center of the `i`-ème points de suspension. -- `aᵢ, bᵢ` — Axes of the `i`- e ellipse en unités de coordonnées x, Y. - -Les paramètres d'entrée doivent être `2+4⋅n`, où `n` est le nombre de points de suspension. - -**Valeurs renvoyées** - -`1` si le point est à l'intérieur d'au moins l'un des ellipses; `0`si elle ne l'est pas. - -**Exemple** - -``` sql -SELECT pointInEllipses(10., 10., 10., 9.1, 1., 0.9999) -``` - -``` text -┌─pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)─┐ -│ 1 │ -└─────────────────────────────────────────────────┘ -``` - -## pointtinpolygon {#pointinpolygon} - -Vérifie si le point appartient au polygone sur l'avion. - -``` sql -pointInPolygon((x, y), [(a, b), (c, d) ...], ...) -``` - -**Les valeurs d'entrée** - -- `(x, y)` — Coordinates of a point on the plane. Data type — [Tuple](../../sql-reference/data-types/tuple.md) — A tuple of two numbers. -- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [Tableau](../../sql-reference/data-types/array.md). Chaque sommet est représenté par une paire de coordonnées `(a, b)`. Les sommets doivent être spécifiés dans le sens horaire ou antihoraire. Le nombre minimum de sommets est 3. Le polygone doit être constante. -- La fonction prend également en charge les polygones avec des trous (découper des sections). Dans ce cas, ajoutez des polygones qui définissent les sections découpées en utilisant des arguments supplémentaires de la fonction. La fonction ne prend pas en charge les polygones non simplement connectés. - -**Valeurs renvoyées** - -`1` si le point est à l'intérieur du polygone, `0` si elle ne l'est pas. -Si le point est sur la limite du polygone, la fonction peut renvoyer 0 ou 1. - -**Exemple** - -``` sql -SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## geohashEncode {#geohashencode} - -Encode la latitude et la longitude en tant que chaîne geohash, voir (http://geohash.org/, https://en.wikipedia.org/wiki/Geohash). - -``` sql -geohashEncode(longitude, latitude, [precision]) -``` - -**Les valeurs d'entrée** - -- longitude longitude partie de la coordonnée que vous souhaitez encoder. Flottant dans la gamme`[-180°, 180°]` -- latitude latitude partie de la coordonnée que vous souhaitez encoder. Flottant dans la gamme `[-90°, 90°]` -- precision-facultatif, longueur de la chaîne codée résultante, par défaut `12`. Entier dans la gamme `[1, 12]`. Toute valeur inférieure à `1` ou supérieure à `12` silencieusement converti à `12`. - -**Valeurs renvoyées** - -- alphanumérique `String` de coordonnées codées (la version modifiée de l'alphabet de codage base32 est utilisée). - -**Exemple** - -``` sql -SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res -``` - -``` text -┌─res──────────┐ -│ ezs42d000000 │ -└──────────────┘ -``` - -## geohashDecode {#geohashdecode} - -Décode toute chaîne codée geohash en longitude et latitude. - -**Les valeurs d'entrée** - -- chaîne codée-chaîne codée geohash. - -**Valeurs renvoyées** - -- (longitude, latitude) - 2-n-uplet de `Float64` les valeurs de longitude et de latitude. - -**Exemple** - -``` sql -SELECT geohashDecode('ezs42') AS res -``` - -``` text -┌─res─────────────────────────────┐ -│ (-5.60302734375,42.60498046875) │ -└─────────────────────────────────┘ -``` - -## geoToH3 {#geotoh3} - -Retourner [H3](https://uber.github.io/h3/#/documentation/overview/introduction) point d'indice `(lon, lat)` avec une résolution spécifiée. - -[H3](https://uber.github.io/h3/#/documentation/overview/introduction) est un système d'indexation géographique où la surface de la Terre divisée en carreaux hexagonaux même. Ce système est hiérarchique, c'est-à-dire que chaque hexagone au niveau supérieur peut être divisé en sept, même mais plus petits, etc. - -Cet indice est principalement utilisé pour les emplacements de bucketing et d'autres manipulations géospatiales. - -**Syntaxe** - -``` sql -geoToH3(lon, lat, resolution) -``` - -**Paramètre** - -- `lon` — Longitude. Type: [Float64](../../sql-reference/data-types/float.md). -- `lat` — Latitude. Type: [Float64](../../sql-reference/data-types/float.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- Numéro d'indice hexagonal. -- 0 en cas d'erreur. - -Type: `UInt64`. - -**Exemple** - -Requête: - -``` sql -SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index -``` - -Résultat: - -``` text -┌────────────h3Index─┐ -│ 644325524701193974 │ -└────────────────────┘ -``` - -## geohashesInBox {#geohashesinbox} - -Renvoie un tableau de chaînes codées geohash de précision donnée qui tombent à l'intérieur et croisent les limites d'une boîte donnée, essentiellement une grille 2D aplatie en tableau. - -**Les valeurs d'entrée** - -- longitude_min-longitude min, valeur flottante dans la plage `[-180°, 180°]` -- latitude_min-latitude min, valeur flottante dans la plage `[-90°, 90°]` -- longitude_max-longitude maximale, valeur flottante dans la plage `[-180°, 180°]` -- latitude_max-latitude maximale, valeur flottante dans la plage `[-90°, 90°]` -- précision - geohash précision, `UInt8` dans la gamme `[1, 12]` - -Veuillez noter que tous les paramètres de coordonnées doit être du même type: soit `Float32` ou `Float64`. - -**Valeurs renvoyées** - -- gamme de précision de longues chaînes de geohash-boîtes couvrant la zone, vous ne devriez pas compter sur l'ordre des éléments. -- \[\] - tableau vide si *min* les valeurs de *latitude* et *longitude* ne sont pas moins de correspondant *Max* valeur. - -Veuillez noter que la fonction lancera une exception si le tableau résultant a plus de 10'000'000 éléments. - -**Exemple** - -``` sql -SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos -``` - -``` text -┌─thasos──────────────────────────────────────┐ -│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │ -└─────────────────────────────────────────────┘ -``` - -## h3GetBaseCell {#h3getbasecell} - -Renvoie le numéro de cellule de base de l'index. - -**Syntaxe** - -``` sql -h3GetBaseCell(index) -``` - -**Paramètre** - -- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- Numéro de cellule de base hexagonale. Type: [UInt8](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT h3GetBaseCell(612916788725809151) as basecell -``` - -Résultat: - -``` text -┌─basecell─┐ -│ 12 │ -└──────────┘ -``` - -## h3HexAreaM2 {#h3hexaream2} - -Surface hexagonale Moyenne en mètres carrés à la résolution donnée. - -**Syntaxe** - -``` sql -h3HexAreaM2(resolution) -``` - -**Paramètre** - -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- Area in m². Type: [Float64](../../sql-reference/data-types/float.md). - -**Exemple** - -Requête: - -``` sql -SELECT h3HexAreaM2(13) as area -``` - -Résultat: - -``` text -┌─area─┐ -│ 43.9 │ -└──────┘ -``` - -## h3IndexesAreNeighbors {#h3indexesareneighbors} - -Renvoie si les H3Indexes fournis sont voisins ou non. - -**Syntaxe** - -``` sql -h3IndexesAreNeighbors(index1, index2) -``` - -**Paramètre** - -- `index1` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md). -- `index2` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- Retourner `1` si les index sont voisins, `0` autrement. Type: [UInt8](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n -``` - -Résultat: - -``` text -┌─n─┐ -│ 1 │ -└───┘ -``` - -## h3enfants {#h3tochildren} - -Retourne un tableau avec les index enfants de l'index donné. - -**Syntaxe** - -``` sql -h3ToChildren(index, resolution) -``` - -**Paramètre** - -- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- Tableau avec les index H3 enfants. Tableau de type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT h3ToChildren(599405990164561919, 6) AS children -``` - -Résultat: - -``` text -┌─children───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ [603909588852408319,603909588986626047,603909589120843775,603909589255061503,603909589389279231,603909589523496959,603909589657714687] │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -## h3ToParent {#h3toparent} - -Renvoie l'index parent (plus grossier) contenant l'index donné. - -**Syntaxe** - -``` sql -h3ToParent(index, resolution) -``` - -**Paramètre** - -- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- Parent H3 index. Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT h3ToParent(599405990164561919, 3) as parent -``` - -Résultat: - -``` text -┌─────────────parent─┐ -│ 590398848891879423 │ -└────────────────────┘ -``` - -## h3ToString {#h3tostring} - -Convertit la représentation H3Index de l'index en représentation de chaîne. - -``` sql -h3ToString(index) -``` - -**Paramètre** - -- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- Représentation en chaîne de l'index H3. Type: [Chaîne](../../sql-reference/data-types/string.md). - -**Exemple** - -Requête: - -``` sql -SELECT h3ToString(617420388352917503) as h3_string -``` - -Résultat: - -``` text -┌─h3_string───────┐ -│ 89184926cdbffff │ -└─────────────────┘ -``` - -## stringToH3 {#stringtoh3} - -Convertit la représentation de chaîne en représentation H3Index (UInt64). - -``` sql -stringToH3(index_str) -``` - -**Paramètre** - -- `index_str` — String representation of the H3 index. Type: [Chaîne](../../sql-reference/data-types/string.md). - -**Valeurs renvoyées** - -- Numéro d'indice hexagonal. Renvoie 0 en cas d'erreur. Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT stringToH3('89184926cc3ffff') as index -``` - -Résultat: - -``` text -┌──────────────index─┐ -│ 617420388351344639 │ -└────────────────────┘ -``` - -## h3grésolution {#h3getresolution} - -Retourne la résolution de l'index. - -**Syntaxe** - -``` sql -h3GetResolution(index) -``` - -**Paramètre** - -- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Valeurs renvoyées** - -- L'indice de la résolution. Gamme: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT h3GetResolution(617420388352917503) as res -``` - -Résultat: - -``` text -┌─res─┐ -│ 9 │ -└─────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/geo/) diff --git a/docs/fr/sql-reference/functions/hash-functions.md b/docs/fr/sql-reference/functions/hash-functions.md deleted file mode 100644 index 3b0f92dd4f8..00000000000 --- a/docs/fr/sql-reference/functions/hash-functions.md +++ /dev/null @@ -1,484 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 50 -toc_title: Hachage ---- - -# Les Fonctions De Hachage {#hash-functions} - -Les fonctions de hachage peuvent être utilisées pour le brassage pseudo-aléatoire déterministe des éléments. - -## halfMD5 {#hash-functions-halfmd5} - -[Interpréter](../../sql-reference/functions/type-conversion-functions.md#type_conversion_functions-reinterpretAsString) tous les paramètres d'entrée sous forme de chaînes et calcule le [MD5](https://en.wikipedia.org/wiki/MD5) la valeur de hachage pour chacun d'eux. Puis combine les hachages, prend les 8 premiers octets du hachage de la chaîne résultante, et les interprète comme `UInt64` dans l'ordre des octets big-endian. - -``` sql -halfMD5(par1, ...) -``` - -La fonction est relativement lente (5 millions de chaînes courtes par seconde par cœur de processeur). -Envisager l'utilisation de la [sipHash64](#hash_functions-siphash64) la fonction la place. - -**Paramètre** - -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql-reference/data-types/index.md). - -**Valeur Renvoyée** - -A [UInt64](../../sql-reference/data-types/int-uint.md) valeur de hachage du type de données. - -**Exemple** - -``` sql -SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type -``` - -``` text -┌────────halfMD5hash─┬─type───┐ -│ 186182704141653334 │ UInt64 │ -└────────────────────┴────────┘ -``` - -## MD5 {#hash_functions-md5} - -Calcule le MD5 à partir d'une chaîne et renvoie L'ensemble d'octets résultant en tant que FixedString(16). -Si vous n'avez pas besoin de MD5 en particulier, mais que vous avez besoin d'un hachage cryptographique 128 bits décent, utilisez le ‘sipHash128’ la fonction la place. -Si vous voulez obtenir le même résultat que la sortie de l'utilitaire md5sum, utilisez lower (hex(MD5 (s))). - -## sipHash64 {#hash_functions-siphash64} - -Produit un 64 bits [SipHash](https://131002.net/siphash/) la valeur de hachage. - -``` sql -sipHash64(par1,...) -``` - -C'est une fonction de hachage cryptographique. Il fonctionne au moins trois fois plus vite que le [MD5](#hash_functions-md5) fonction. - -Fonction [interpréter](../../sql-reference/functions/type-conversion-functions.md#type_conversion_functions-reinterpretAsString) tous les paramètres d'entrée sous forme de chaînes et calcule la valeur de hachage pour chacun d'eux. Puis combine les hachages par l'algorithme suivant: - -1. Après avoir haché tous les paramètres d'entrée, la fonction obtient le tableau de hachages. -2. La fonction prend le premier et le second éléments et calcule un hachage pour le tableau d'entre eux. -3. Ensuite, la fonction prend la valeur de hachage, calculée à l'étape précédente, et le troisième élément du tableau de hachage initial, et calcule un hachage pour le tableau d'entre eux. -4. L'étape précédente est répétée pour tous les éléments restants de la période initiale de hachage tableau. - -**Paramètre** - -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql-reference/data-types/index.md). - -**Valeur Renvoyée** - -A [UInt64](../../sql-reference/data-types/int-uint.md) valeur de hachage du type de données. - -**Exemple** - -``` sql -SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type -``` - -``` text -┌──────────────SipHash─┬─type───┐ -│ 13726873534472839665 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -## sipHash128 {#hash_functions-siphash128} - -Calcule SipHash à partir d'une chaîne. -Accepte un argument de type chaîne. Renvoie FixedString (16). -Diffère de sipHash64 en ce que l'état de pliage xor final n'est effectué que jusqu'à 128 bits. - -## cityHash64 {#cityhash64} - -Produit un 64 bits [CityHash](https://github.com/google/cityhash) la valeur de hachage. - -``` sql -cityHash64(par1,...) -``` - -Ceci est une fonction de hachage non cryptographique rapide. Il utilise L'algorithme CityHash pour les paramètres de chaîne et la fonction de hachage rapide non cryptographique spécifique à l'implémentation pour les paramètres avec d'autres types de données. La fonction utilise le combinateur CityHash pour obtenir les résultats finaux. - -**Paramètre** - -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql-reference/data-types/index.md). - -**Valeur Renvoyée** - -A [UInt64](../../sql-reference/data-types/int-uint.md) valeur de hachage du type de données. - -**Exemple** - -Appelez exemple: - -``` sql -SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type -``` - -``` text -┌─────────────CityHash─┬─type───┐ -│ 12072650598913549138 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -L'exemple suivant montre comment calculer la somme de l'ensemble de la table avec précision jusqu'à la ligne de commande: - -``` sql -SELECT groupBitXor(cityHash64(*)) FROM table -``` - -## intHash32 {#inthash32} - -Calcule un code de hachage 32 bits à partir de n'importe quel type d'entier. -C'est une fonction de hachage non cryptographique relativement rapide de qualité moyenne pour les nombres. - -## intHash64 {#inthash64} - -Calcule un code de hachage 64 bits à partir de n'importe quel type d'entier. -Il fonctionne plus vite que intHash32. Qualité moyenne. - -## SHA1 {#sha1} - -## SHA224 {#sha224} - -## SHA256 {#sha256} - -Calcule SHA-1, SHA-224 ou SHA-256 à partir d'une chaîne et renvoie l'ensemble d'octets résultant en tant que FixedString(20), FixedString(28) ou FixedString(32). -La fonction fonctionne assez lentement (SHA-1 traite environ 5 millions de chaînes courtes par seconde par cœur de processeur, tandis que SHA-224 et SHA-256 traitent environ 2,2 millions). -Nous vous recommandons d'utiliser cette fonction uniquement dans les cas où vous avez besoin d'une fonction de hachage spécifique et que vous ne pouvez pas la sélectionner. -Même dans ces cas, nous vous recommandons d'appliquer la fonction hors ligne et de pré-calculer les valeurs lors de leur insertion dans la table, au lieu de l'appliquer dans SELECTS. - -## URLHash(url \[, N\]) {#urlhashurl-n} - -Une fonction de hachage non cryptographique rapide et de qualité décente pour une chaîne obtenue à partir d'une URL en utilisant un type de normalisation. -`URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` ou `#` à la fin, si elle est présente. -`URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` ou `#` à la fin, si elle est présente. -Les niveaux sont les mêmes que dans URLHierarchy. Cette fonction est spécifique à Yandex.Metrica. - -## farmHash64 {#farmhash64} - -Produit un 64 bits [FarmHash](https://github.com/google/farmhash) la valeur de hachage. - -``` sql -farmHash64(par1, ...) -``` - -La fonction utilise le `Hash64` la méthode de tous les [les méthodes disponibles](https://github.com/google/farmhash/blob/master/src/farmhash.h). - -**Paramètre** - -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql-reference/data-types/index.md). - -**Valeur Renvoyée** - -A [UInt64](../../sql-reference/data-types/int-uint.md) valeur de hachage du type de données. - -**Exemple** - -``` sql -SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type -``` - -``` text -┌─────────────FarmHash─┬─type───┐ -│ 17790458267262532859 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -## javaHash {#hash_functions-javahash} - -Calculer [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) à partir d'une chaîne. Cette fonction de hachage n'est ni rapide ni de bonne qualité. La seule raison de l'utiliser est lorsque cet algorithme est déjà utilisé dans un autre système et que vous devez calculer exactement le même résultat. - -**Syntaxe** - -``` sql -SELECT javaHash(''); -``` - -**Valeur renvoyée** - -A `Int32` valeur de hachage du type de données. - -**Exemple** - -Requête: - -``` sql -SELECT javaHash('Hello, world!'); -``` - -Résultat: - -``` text -┌─javaHash('Hello, world!')─┐ -│ -1880044555 │ -└───────────────────────────┘ -``` - -## javaHashUTF16LE {#javahashutf16le} - -Calculer [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) à partir d'une chaîne, en supposant qu'elle contient des octets représentant une chaîne en encodage UTF-16LE. - -**Syntaxe** - -``` sql -javaHashUTF16LE(stringUtf16le) -``` - -**Paramètre** - -- `stringUtf16le` — a string in UTF-16LE encoding. - -**Valeur renvoyée** - -A `Int32` valeur de hachage du type de données. - -**Exemple** - -Requête correcte avec une chaîne codée UTF-16LE. - -Requête: - -``` sql -SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) -``` - -Résultat: - -``` text -┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ -│ 3556498 │ -└──────────────────────────────────────────────────────────────┘ -``` - -## hiveHash {#hash-functions-hivehash} - -Calculer `HiveHash` à partir d'une chaîne. - -``` sql -SELECT hiveHash(''); -``` - -C'est juste [JavaHash](#hash_functions-javahash) avec le bit de signe mis à zéro. Cette fonction est utilisée dans [Apache Hive](https://en.wikipedia.org/wiki/Apache_Hive) pour les versions antérieures à la version 3.0. Cette fonction de hachage n'est ni rapide ni de bonne qualité. La seule raison de l'utiliser est lorsque cet algorithme est déjà utilisé dans un autre système et que vous devez calculer exactement le même résultat. - -**Valeur renvoyée** - -A `Int32` valeur de hachage du type de données. - -Type: `hiveHash`. - -**Exemple** - -Requête: - -``` sql -SELECT hiveHash('Hello, world!'); -``` - -Résultat: - -``` text -┌─hiveHash('Hello, world!')─┐ -│ 267439093 │ -└───────────────────────────┘ -``` - -## metroHash64 {#metrohash64} - -Produit un 64 bits [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/) la valeur de hachage. - -``` sql -metroHash64(par1, ...) -``` - -**Paramètre** - -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql-reference/data-types/index.md). - -**Valeur Renvoyée** - -A [UInt64](../../sql-reference/data-types/int-uint.md) valeur de hachage du type de données. - -**Exemple** - -``` sql -SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type -``` - -``` text -┌────────────MetroHash─┬─type───┐ -│ 14235658766382344533 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -## jumpConsistentHash {#jumpconsistenthash} - -Calcule JumpConsistentHash forme un UInt64. -Accepte deux arguments: une clé de type UInt64 et le nombre de compartiments. Renvoie Int32. -Pour plus d'informations, voir le lien: [JumpConsistentHash](https://arxiv.org/pdf/1406.2294.pdf) - -## murmurHash2_32, murmurHash2_64 {#murmurhash2-32-murmurhash2-64} - -Produit un [MurmurHash2](https://github.com/aappleby/smhasher) la valeur de hachage. - -``` sql -murmurHash2_32(par1, ...) -murmurHash2_64(par1, ...) -``` - -**Paramètre** - -Les deux fonctions prennent un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql-reference/data-types/index.md). - -**Valeur Renvoyée** - -- Le `murmurHash2_32` fonction renvoie la valeur de hachage ayant le [UInt32](../../sql-reference/data-types/int-uint.md) type de données. -- Le `murmurHash2_64` fonction renvoie la valeur de hachage ayant le [UInt64](../../sql-reference/data-types/int-uint.md) type de données. - -**Exemple** - -``` sql -SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type -``` - -``` text -┌──────────MurmurHash2─┬─type───┐ -│ 11832096901709403633 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -## gccMurmurHash {#gccmurmurhash} - -Calcule un 64 bits [MurmurHash2](https://github.com/aappleby/smhasher) valeur de hachage utilisant la même graine de hachage que [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191). Il est portable entre Clang et GCC construit. - -**Syntaxe** - -``` sql -gccMurmurHash(par1, ...); -``` - -**Paramètre** - -- `par1, ...` — A variable number of parameters that can be any of the [types de données pris en charge](../../sql-reference/data-types/index.md#data_types). - -**Valeur renvoyée** - -- Valeur de hachage calculée. - -Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT - gccMurmurHash(1, 2, 3) AS res1, - gccMurmurHash(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))) AS res2 -``` - -Résultat: - -``` text -┌─────────────────res1─┬────────────────res2─┐ -│ 12384823029245979431 │ 1188926775431157506 │ -└──────────────────────┴─────────────────────┘ -``` - -## murmurHash3_32, murmurHash3_64 {#murmurhash3-32-murmurhash3-64} - -Produit un [MurmurHash3](https://github.com/aappleby/smhasher) la valeur de hachage. - -``` sql -murmurHash3_32(par1, ...) -murmurHash3_64(par1, ...) -``` - -**Paramètre** - -Les deux fonctions prennent un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql-reference/data-types/index.md). - -**Valeur Renvoyée** - -- Le `murmurHash3_32` la fonction retourne un [UInt32](../../sql-reference/data-types/int-uint.md) valeur de hachage du type de données. -- Le `murmurHash3_64` la fonction retourne un [UInt64](../../sql-reference/data-types/int-uint.md) valeur de hachage du type de données. - -**Exemple** - -``` sql -SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type -``` - -``` text -┌─MurmurHash3─┬─type───┐ -│ 2152717 │ UInt32 │ -└─────────────┴────────┘ -``` - -## murmurHash3_128 {#murmurhash3-128} - -Produit de 128 bits [MurmurHash3](https://github.com/aappleby/smhasher) la valeur de hachage. - -``` sql -murmurHash3_128( expr ) -``` - -**Paramètre** - -- `expr` — [Expression](../syntax.md#syntax-expressions) de retour d'un [Chaîne](../../sql-reference/data-types/string.md)-le type de la valeur. - -**Valeur Renvoyée** - -A [FixedString (16)](../../sql-reference/data-types/fixedstring.md) valeur de hachage du type de données. - -**Exemple** - -``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type -``` - -``` text -┌─MurmurHash3──────┬─type────────────┐ -│ 6�1�4"S5KT�~~q │ FixedString(16) │ -└──────────────────┴─────────────────┘ -``` - -## xxHash32, xxHash64 {#hash-functions-xxhash32} - -Calculer `xxHash` à partir d'une chaîne. Il est proposé en deux saveurs, 32 et 64 bits. - -``` sql -SELECT xxHash32(''); - -OR - -SELECT xxHash64(''); -``` - -**Valeur renvoyée** - -A `Uint32` ou `Uint64` valeur de hachage du type de données. - -Type: `xxHash`. - -**Exemple** - -Requête: - -``` sql -SELECT xxHash32('Hello, world!'); -``` - -Résultat: - -``` text -┌─xxHash32('Hello, world!')─┐ -│ 834093149 │ -└───────────────────────────┘ -``` - -**Voir Aussi** - -- [xxHash](http://cyan4973.github.io/xxHash/). - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/fr/sql-reference/functions/higher-order-functions.md b/docs/fr/sql-reference/functions/higher-order-functions.md deleted file mode 100644 index ac24b67bb97..00000000000 --- a/docs/fr/sql-reference/functions/higher-order-functions.md +++ /dev/null @@ -1,264 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 57 -toc_title: "D'Ordre Sup\xE9rieur" ---- - -# Fonctions d'ordre supérieur {#higher-order-functions} - -## `->` opérateur, fonction lambda (params, expr) {#operator-lambdaparams-expr-function} - -Allows describing a lambda function for passing to a higher-order function. The left side of the arrow has a formal parameter, which is any ID, or multiple formal parameters – any IDs in a tuple. The right side of the arrow has an expression that can use these formal parameters, as well as any table columns. - -Exemple: `x -> 2 * x, str -> str != Referer.` - -Les fonctions d'ordre supérieur ne peuvent accepter que les fonctions lambda comme argument fonctionnel. - -Une fonction lambda qui accepte plusieurs arguments peuvent être passés à une fonction d'ordre supérieur. Dans ce cas, la fonction d'ordre supérieur est passé plusieurs tableaux de longueur identique que ces arguments correspondent. - -Pour certaines fonctions, telles que [arrayCount](#higher_order_functions-array-count) ou [arraySum](#higher_order_functions-array-count) le premier argument (la fonction lambda) peut être omis. Dans ce cas, un mappage identique est supposé. - -Une fonction lambda ne peut pas être omise pour les fonctions suivantes: - -- [arrayMap](#higher_order_functions-array-map) -- [arrayFilter](#higher_order_functions-array-filter) -- [arrayFill](#higher_order_functions-array-fill) -- [arrayReverseFill](#higher_order_functions-array-reverse-fill) -- [arraySplit](#higher_order_functions-array-split) -- [arrayReverseSplit](#higher_order_functions-array-reverse-split) -- [arrayFirst](#higher_order_functions-array-first) -- [arrayFirstIndex](#higher_order_functions-array-first-index) - -### arrayMap(func, arr1, …) {#higher_order_functions-array-map} - -Renvoie un tableau obtenu à partir de l'application d'origine `func` fonction à chaque élément dans le `arr` tableau. - -Exemple: - -``` sql -SELECT arrayMap(x -> (x + 2), [1, 2, 3]) as res; -``` - -``` text -┌─res─────┐ -│ [3,4,5] │ -└─────────┘ -``` - -L'exemple suivant montre comment créer un n-uplet d'éléments de différents tableaux: - -``` sql -SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res -``` - -``` text -┌─res─────────────────┐ -│ [(1,4),(2,5),(3,6)] │ -└─────────────────────┘ -``` - -Notez que le premier argument (fonction lambda) ne peut pas être omis dans le `arrayMap` fonction. - -### arrayFilter(func, arr1, …) {#higher_order_functions-array-filter} - -Renvoie un tableau contenant uniquement les éléments `arr1` pour ce qui `func` retourne autre chose que 0. - -Exemple: - -``` sql -SELECT arrayFilter(x -> x LIKE '%World%', ['Hello', 'abc World']) AS res -``` - -``` text -┌─res───────────┐ -│ ['abc World'] │ -└───────────────┘ -``` - -``` sql -SELECT - arrayFilter( - (i, x) -> x LIKE '%World%', - arrayEnumerate(arr), - ['Hello', 'abc World'] AS arr) - AS res -``` - -``` text -┌─res─┐ -│ [2] │ -└─────┘ -``` - -Notez que le premier argument (fonction lambda) ne peut pas être omis dans le `arrayFilter` fonction. - -### arrayFill(func, arr1, …) {#higher_order_functions-array-fill} - -Analyse par le biais de `arr1` du premier élément au dernier élément et remplacer `arr1[i]` par `arr1[i - 1]` si `func` renvoie 0. Le premier élément de `arr1` ne sera pas remplacé. - -Exemple: - -``` sql -SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res -``` - -``` text -┌─res──────────────────────────────┐ -│ [1,1,3,11,12,12,12,5,6,14,14,14] │ -└──────────────────────────────────┘ -``` - -Notez que le premier argument (fonction lambda) ne peut pas être omis dans le `arrayFill` fonction. - -### arrayReverseFill(func, arr1, …) {#higher_order_functions-array-reverse-fill} - -Analyse par le biais de `arr1` du dernier élément au premier élément et remplacer `arr1[i]` par `arr1[i + 1]` si `func` renvoie 0. Le dernier élément de `arr1` ne sera pas remplacé. - -Exemple: - -``` sql -SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res -``` - -``` text -┌─res────────────────────────────────┐ -│ [1,3,3,11,12,5,5,5,6,14,NULL,NULL] │ -└────────────────────────────────────┘ -``` - -Notez que le premier argument (fonction lambda) ne peut pas être omis dans le `arrayReverseFill` fonction. - -### arraySplit(func, arr1, …) {#higher_order_functions-array-split} - -Split `arr1` en plusieurs tableaux. Lorsque `func` retourne autre chose que 0, la matrice sera de split sur le côté gauche de l'élément. Le tableau ne sera pas partagé avant le premier élément. - -Exemple: - -``` sql -SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res -``` - -``` text -┌─res─────────────┐ -│ [[1,2,3],[4,5]] │ -└─────────────────┘ -``` - -Notez que le premier argument (fonction lambda) ne peut pas être omis dans le `arraySplit` fonction. - -### arrayReverseSplit(func, arr1, …) {#higher_order_functions-array-reverse-split} - -Split `arr1` en plusieurs tableaux. Lorsque `func` retourne autre chose que 0, la matrice sera de split sur le côté droit de l'élément. Le tableau ne sera pas divisé après le dernier élément. - -Exemple: - -``` sql -SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res -``` - -``` text -┌─res───────────────┐ -│ [[1],[2,3,4],[5]] │ -└───────────────────┘ -``` - -Notez que le premier argument (fonction lambda) ne peut pas être omis dans le `arraySplit` fonction. - -### arrayCount(\[func,\] arr1, …) {#higher_order_functions-array-count} - -Renvoie le nombre d'éléments dans l'arr tableau pour lequel func renvoie autre chose que 0. Si ‘func’ n'est pas spécifié, il renvoie le nombre d'éléments non nuls dans le tableau. - -### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} - -Renvoie 1 s'il existe au moins un élément ‘arr’ pour ce qui ‘func’ retourne autre chose que 0. Sinon, il renvoie 0. - -### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} - -Renvoie 1 si ‘func’ retourne autre chose que 0 pour tous les éléments de ‘arr’. Sinon, il renvoie 0. - -### arraySum(\[func,\] arr1, …) {#higher-order-functions-array-sum} - -Renvoie la somme de la ‘func’ valeur. Si la fonction est omise, elle retourne la somme des éléments du tableau. - -### arrayFirst(func, arr1, …) {#higher_order_functions-array-first} - -Renvoie le premier élément du ‘arr1’ tableau pour lequel ‘func’ retourne autre chose que 0. - -Notez que le premier argument (fonction lambda) ne peut pas être omis dans le `arrayFirst` fonction. - -### arrayFirstIndex(func, arr1, …) {#higher_order_functions-array-first-index} - -Renvoie l'index du premier élément de la ‘arr1’ tableau pour lequel ‘func’ retourne autre chose que 0. - -Notez que le premier argument (fonction lambda) ne peut pas être omis dans le `arrayFirstIndex` fonction. - -### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} - -Retourne un tableau des sommes partielles d'éléments dans le tableau source (une somme). Si l' `func` la fonction est spécifiée, les valeurs des éléments du tableau sont convertis par cette fonction avant l'addition. - -Exemple: - -``` sql -SELECT arrayCumSum([1, 1, 1, 1]) AS res -``` - -``` text -┌─res──────────┐ -│ [1, 2, 3, 4] │ -└──────────────┘ -``` - -### arrayCumSumNonNegative (arr) {#arraycumsumnonnegativearr} - -Même que `arrayCumSum`, renvoie un tableau des sommes partielles d'éléments dans le tableau source (une somme). Différent `arrayCumSum`, lorsque la valeur renvoyée contient une valeur inférieure à zéro, la valeur est remplacée par zéro et le calcul ultérieur est effectué avec des paramètres zéro. Exemple: - -``` sql -SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res -``` - -``` text -┌─res───────┐ -│ [1,2,0,1] │ -└───────────┘ -``` - -### arraySort(\[func,\] arr1, …) {#arraysortfunc-arr1} - -Renvoie un tableau à la suite du tri des éléments de `arr1` dans l'ordre croissant. Si l' `func` la fonction est spécifiée, l'ordre de classement est déterminé par le résultat de la fonction `func` appliquée aux éléments du tableau (tableaux) - -Le [Transformation schwartzienne](https://en.wikipedia.org/wiki/Schwartzian_transform) est utilisé pour améliorer l'efficacité du tri. - -Exemple: - -``` sql -SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); -``` - -``` text -┌─res────────────────┐ -│ ['world', 'hello'] │ -└────────────────────┘ -``` - -Pour plus d'informations sur la `arraySort` la méthode, voir l' [Fonctions pour travailler avec des tableaux](array-functions.md#array_functions-sort) section. - -### arrayReverseSort(\[func,\] arr1, …) {#arrayreversesortfunc-arr1} - -Renvoie un tableau à la suite du tri des éléments de `arr1` dans l'ordre décroissant. Si l' `func` la fonction est spécifiée, l'ordre de classement est déterminé par le résultat de la fonction `func` appliquée aux éléments du tableau (tableaux). - -Exemple: - -``` sql -SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; -``` - -``` text -┌─res───────────────┐ -│ ['hello','world'] │ -└───────────────────┘ -``` - -Pour plus d'informations sur la `arrayReverseSort` la méthode, voir l' [Fonctions pour travailler avec des tableaux](array-functions.md#array_functions-reverse-sort) section. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/higher_order_functions/) diff --git a/docs/fr/sql-reference/functions/in-functions.md b/docs/fr/sql-reference/functions/in-functions.md deleted file mode 100644 index ced5ef73e46..00000000000 --- a/docs/fr/sql-reference/functions/in-functions.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 60 -toc_title: "Mise en \u0153uvre de L'op\xE9rateur IN" ---- - -# Fonctions de mise en œuvre de L'opérateur IN {#functions-for-implementing-the-in-operator} - -## in, notin, globalIn, globalNotIn {#in-functions} - -Voir la section [Dans les opérateurs](../operators/in.md#select-in-operators). - -## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} - -Une fonction qui permet de regrouper plusieurs colonnes. -For columns with the types T1, T2, …, it returns a Tuple(T1, T2, …) type tuple containing these columns. There is no cost to execute the function. -Les Tuples sont normalement utilisés comme valeurs intermédiaires pour un argument D'opérateurs IN, ou pour créer une liste de paramètres formels de fonctions lambda. Les Tuples ne peuvent pas être écrits sur une table. - -## tupleElement (tuple, n), opérateur X. N {#tupleelementtuple-n-operator-x-n} - -Une fonction qui permet d'obtenir une colonne à partir d'un tuple. -‘N’ est l'index de colonne, à partir de 1. N doit être une constante. ‘N’ doit être une constante. ‘N’ doit être un entier postif strict ne dépassant pas la taille du tuple. -Il n'y a aucun coût pour exécuter la fonction. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/fr/sql-reference/functions/index.md b/docs/fr/sql-reference/functions/index.md deleted file mode 100644 index 6e5333f68f5..00000000000 --- a/docs/fr/sql-reference/functions/index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Fonction -toc_priority: 32 -toc_title: Introduction ---- - -# Fonction {#functions} - -Il y a au moins\* deux types de fonctions - des fonctions régulières (elles sont simplement appelées “functions”) and aggregate functions. These are completely different concepts. Regular functions work as if they are applied to each row separately (for each row, the result of the function doesn't depend on the other rows). Aggregate functions accumulate a set of values from various rows (i.e. they depend on the entire set of rows). - -Dans cette section, nous discutons des fonctions classiques. Pour les fonctions d'agrégation, voir la section “Aggregate functions”. - -\* - Il existe un troisième type de fonction ‘arrayJoin’ la fonction appartient à; les fonctions de table peuvent également être mentionnées séparément.\* - -## Typage Fort {#strong-typing} - -Contrairement à SQL standard, ClickHouse a une forte typage. En d'autres termes, il ne fait pas de conversions implicites entre les types. Chaque fonction fonctionne pour un ensemble spécifique de types. Cela signifie que vous devez parfois utiliser des fonctions de conversion de type. - -## Élimination Des Sous-Expressions Courantes {#common-subexpression-elimination} - -Toutes les expressions d'une requête qui ont le même AST (le même enregistrement ou le même résultat d'analyse syntaxique) sont considérées comme ayant des valeurs identiques. De telles expressions sont concaténées et exécutées une fois. Les sous-requêtes identiques sont également éliminées de cette façon. - -## Types de résultats {#types-of-results} - -Toutes les fonctions renvoient un seul retour comme résultat (pas plusieurs valeurs, et pas des valeurs nulles). Le type de résultat est généralement défini uniquement par les types d'arguments, pas par les valeurs. Les Exceptions sont la fonction tupleElement (l'opérateur A. N) et la fonction toFixedString. - -## Constant {#constants} - -Pour simplifier, certaines fonctions ne peuvent fonctionner qu'avec des constantes pour certains arguments. Par exemple, le bon argument de L'opérateur LIKE doit être une constante. -Presque toutes les fonctions renvoient une constante pour des arguments constants. L'exception est les fonctions qui génèrent des nombres aléatoires. -Le ‘now’ function renvoie des valeurs différentes pour les requêtes qui ont été exécutées à des moments différents, mais le résultat est considéré comme une constante, car la constance n'est importante que dans une seule requête. -Une expression constante est également considérée comme une constante (par exemple, la moitié droite de L'opérateur LIKE peut être construite à partir de plusieurs constantes). - -Les fonctions peuvent être implémentées de différentes manières pour des arguments constants et non constants (un code différent est exécuté). Mais les résultats pour une constante et pour une colonne vraie Ne contenant que la même valeur doivent correspondre les uns aux autres. - -## Le Traitement NULL {#null-processing} - -Les fonctions ont les comportements suivants: - -- Si au moins l'un des arguments de la fonction est `NULL` le résultat de la fonction est également `NULL`. -- Comportement spécial spécifié individuellement dans la description de chaque fonction. Dans le code source de ClickHouse, ces fonctions ont `UseDefaultImplementationForNulls=false`. - -## Constance {#constancy} - -Functions can't change the values of their arguments – any changes are returned as the result. Thus, the result of calculating separate functions does not depend on the order in which the functions are written in the query. - -## Erreur De Manipulation {#error-handling} - -Certaines fonctions peuvent lancer une exception si les données ne sont pas valides. Dans ce cas, la requête est annulée et un message d'erreur est retourné au client. Pour le traitement distribué, lorsqu'une exception se produit sur l'un des serveurs, les autres serveurs aussi tenté d'interrompre la requête. - -## Évaluation des Expressions D'Argument {#evaluation-of-argument-expressions} - -Dans presque tous les langages de programmation, l'un des arguments peut pas être évalué pour certains opérateurs. Ce sont généralement les opérateurs `&&`, `||`, et `?:`. -Mais dans ClickHouse, les arguments des fonctions (opérateurs) sont toujours évalués. En effet, des parties entières de colonnes sont évaluées à la fois, au lieu de calculer chaque ligne séparément. - -## Exécution de fonctions pour le traitement de requêtes distribuées {#performing-functions-for-distributed-query-processing} - -Pour le traitement de requête distribué, autant d'étapes de traitement de requête que possible sont effectuées sur des serveurs distants, et le reste des étapes (fusion des résultats intermédiaires et tout ce qui suit) sont effectuées sur le serveur demandeur. - -Cela signifie que les fonctions peuvent être effectuées sur différents serveurs. -Par exemple, dans la requête `SELECT f(sum(g(x))) FROM distributed_table GROUP BY h(y),` - -- si un `distributed_table` a au moins deux fragments, les fonctions ‘g’ et ‘h’ sont effectuées sur des serveurs distants, et la fonction ‘f’ est effectuée sur le serveur demandeur. -- si un `distributed_table` a un seul fragment, tous les ‘f’, ‘g’, et ‘h’ les fonctions sont exécutées sur le serveur de ce fragment. - -Le résultat d'une fonction habituellement ne dépendent pas le serveur sur lequel elle est exécutée. Cependant, parfois c'est important. -Par exemple, les fonctions qui fonctionnent avec des dictionnaires utilisent le dictionnaire qui existe sur le serveur sur lequel elles s'exécutent. -Un autre exemple est l' `hostName` fonction, qui renvoie le nom du serveur sur lequel il s'exécute afin de `GROUP BY` par les serveurs dans un `SELECT` requête. - -Si une fonction dans une requête est effectuée sur le demandeur serveur, mais vous devez l'exécuter sur des serveurs distants, vous pouvez l'envelopper dans un ‘any’ fonction d'agrégation ou l'ajouter à une clé dans `GROUP BY`. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/fr/sql-reference/functions/introspection.md b/docs/fr/sql-reference/functions/introspection.md deleted file mode 100644 index 91299217dc7..00000000000 --- a/docs/fr/sql-reference/functions/introspection.md +++ /dev/null @@ -1,310 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 65 -toc_title: Introspection ---- - -# Fonctions D'Introspection {#introspection-functions} - -Vous pouvez utiliser les fonctions décrites dans ce chapitre pour introspecter [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) et [DWARF](https://en.wikipedia.org/wiki/DWARF) pour le profilage de requête. - -!!! warning "Avertissement" - Ces fonctions sont lentes et peuvent imposer des considérations de sécurité. - -Pour le bon fonctionnement des fonctions d'introspection: - -- Installer le `clickhouse-common-static-dbg` paquet. - -- Définir le [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) réglage sur 1. - - For security reasons introspection functions are disabled by default. - -Clickhouse enregistre les rapports du profileur [trace_log](../../operations/system-tables.md#system_tables-trace_log) système de table. Assurez-vous que la table et le profileur sont correctement configurés. - -## addressToLine {#addresstoline} - -Convertit l'adresse de mémoire virtuelle dans le processus de serveur ClickHouse en nom de fichier et en numéro de ligne dans le code source de ClickHouse. - -Si vous utilisez des paquets clickhouse officiels, vous devez installer le `clickhouse-common-static-dbg` paquet. - -**Syntaxe** - -``` sql -addressToLine(address_of_binary_instruction) -``` - -**Paramètre** - -- `address_of_binary_instruction` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Address of instruction in a running process. - -**Valeur renvoyée** - -- Nom de fichier du code Source et le numéro de ligne dans ce fichier délimité par deux-points. - - For example, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number. - -- Nom d'un binaire, si la fonction n'a pas pu trouver les informations de débogage. - -- Chaîne vide, si l'adresse n'est pas valide. - -Type: [Chaîne](../../sql-reference/data-types/string.md). - -**Exemple** - -Activation des fonctions d'introspection: - -``` sql -SET allow_introspection_functions=1 -``` - -Sélection de la première chaîne de `trace_log` système de table: - -``` sql -SELECT * FROM system.trace_log LIMIT 1 \G -``` - -``` text -Row 1: -────── -event_date: 2019-11-19 -event_time: 2019-11-19 18:57:23 -revision: 54429 -timer_type: Real -thread_number: 48 -query_id: 421b6855-1858-45a5-8f37-f383409d6d72 -trace: [140658411141617,94784174532828,94784076370703,94784076372094,94784076361020,94784175007680,140658411116251,140658403895439] -``` - -Le `trace` champ contient la trace de pile au moment de l'échantillonnage. - -Obtenir le nom de fichier du code source et le numéro de ligne pour une seule adresse: - -``` sql -SELECT addressToLine(94784076370703) \G -``` - -``` text -Row 1: -────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 -``` - -Application de la fonction à la trace de la pile entière: - -``` sql -SELECT - arrayStringConcat(arrayMap(x -> addressToLine(x), trace), '\n') AS trace_source_code_lines -FROM system.trace_log -LIMIT 1 -\G -``` - -Le [arrayMap](higher-order-functions.md#higher_order_functions-array-map) permet de traiter chaque élément individuel de l' `trace` tableau par la `addressToLine` fonction. Le résultat de ce traitement que vous voyez dans l' `trace_source_code_lines` colonne de sortie. - -``` text -Row 1: -────── -trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so -/usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.h:155 -/usr/include/c++/9/bits/atomic_base.h:551 -/usr/lib/debug/usr/bin/clickhouse -/lib/x86_64-linux-gnu/libpthread-2.27.so -/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97 -``` - -## adressetosymbol {#addresstosymbol} - -Convertit l'adresse de mémoire virtuelle dans le processus de serveur ClickHouse en symbole à partir des fichiers d'objets ClickHouse. - -**Syntaxe** - -``` sql -addressToSymbol(address_of_binary_instruction) -``` - -**Paramètre** - -- `address_of_binary_instruction` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Address of instruction in a running process. - -**Valeur renvoyée** - -- Symbole des fichiers D'objets ClickHouse. -- Chaîne vide, si l'adresse n'est pas valide. - -Type: [Chaîne](../../sql-reference/data-types/string.md). - -**Exemple** - -Activation des fonctions d'introspection: - -``` sql -SET allow_introspection_functions=1 -``` - -Sélection de la première chaîne de `trace_log` système de table: - -``` sql -SELECT * FROM system.trace_log LIMIT 1 \G -``` - -``` text -Row 1: -────── -event_date: 2019-11-20 -event_time: 2019-11-20 16:57:59 -revision: 54429 -timer_type: Real -thread_number: 48 -query_id: 724028bf-f550-45aa-910d-2af6212b94ac -trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] -``` - -Le `trace` champ contient la trace de pile au moment de l'échantillonnage. - -Obtenir un symbole pour une seule adresse: - -``` sql -SELECT addressToSymbol(94138803686098) \G -``` - -``` text -Row 1: -────── -addressToSymbol(94138803686098): _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE -``` - -Application de la fonction à la trace de la pile entière: - -``` sql -SELECT - arrayStringConcat(arrayMap(x -> addressToSymbol(x), trace), '\n') AS trace_symbols -FROM system.trace_log -LIMIT 1 -\G -``` - -Le [arrayMap](higher-order-functions.md#higher_order_functions-array-map) permet de traiter chaque élément individuel de l' `trace` tableau par la `addressToSymbols` fonction. Le résultat de ce traitement que vous voyez dans l' `trace_symbols` colonne de sortie. - -``` text -Row 1: -────── -trace_symbols: _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE -_ZNK2DB10Aggregator21executeWithoutKeyImplERPcmPNS0_28AggregateFunctionInstructionEPNS_5ArenaE -_ZN2DB10Aggregator14executeOnBlockESt6vectorIN3COWINS_7IColumnEE13immutable_ptrIS3_EESaIS6_EEmRNS_22AggregatedDataVariantsERS1_IPKS3_SaISC_EERS1_ISE_SaISE_EERb -_ZN2DB10Aggregator14executeOnBlockERKNS_5BlockERNS_22AggregatedDataVariantsERSt6vectorIPKNS_7IColumnESaIS9_EERS6_ISB_SaISB_EERb -_ZN2DB10Aggregator7executeERKSt10shared_ptrINS_17IBlockInputStreamEERNS_22AggregatedDataVariantsE -_ZN2DB27AggregatingBlockInputStream8readImplEv -_ZN2DB17IBlockInputStream4readEv -_ZN2DB26ExpressionBlockInputStream8readImplEv -_ZN2DB17IBlockInputStream4readEv -_ZN2DB26ExpressionBlockInputStream8readImplEv -_ZN2DB17IBlockInputStream4readEv -_ZN2DB28AsynchronousBlockInputStream9calculateEv -_ZNSt17_Function_handlerIFvvEZN2DB28AsynchronousBlockInputStream4nextEvEUlvE_E9_M_invokeERKSt9_Any_data -_ZN14ThreadPoolImplI20ThreadFromGlobalPoolE6workerESt14_List_iteratorIS0_E -_ZZN20ThreadFromGlobalPoolC4IZN14ThreadPoolImplIS_E12scheduleImplIvEET_St8functionIFvvEEiSt8optionalImEEUlvE1_JEEEOS4_DpOT0_ENKUlvE_clEv -_ZN14ThreadPoolImplISt6threadE6workerESt14_List_iteratorIS0_E -execute_native_thread_routine -start_thread -clone -``` - -## demangle {#demangle} - -Convertit un symbole que vous pouvez obtenir en utilisant le [adressetosymbol](#addresstosymbol) fonction au nom de la fonction c++. - -**Syntaxe** - -``` sql -demangle(symbol) -``` - -**Paramètre** - -- `symbol` ([Chaîne](../../sql-reference/data-types/string.md)) — Symbol from an object file. - -**Valeur renvoyée** - -- Nom de la fonction C++. -- Chaîne vide si un symbole n'est pas valide. - -Type: [Chaîne](../../sql-reference/data-types/string.md). - -**Exemple** - -Activation des fonctions d'introspection: - -``` sql -SET allow_introspection_functions=1 -``` - -Sélection de la première chaîne de `trace_log` système de table: - -``` sql -SELECT * FROM system.trace_log LIMIT 1 \G -``` - -``` text -Row 1: -────── -event_date: 2019-11-20 -event_time: 2019-11-20 16:57:59 -revision: 54429 -timer_type: Real -thread_number: 48 -query_id: 724028bf-f550-45aa-910d-2af6212b94ac -trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] -``` - -Le `trace` champ contient la trace de pile au moment de l'échantillonnage. - -Obtenir un nom de fonction pour une seule adresse: - -``` sql -SELECT demangle(addressToSymbol(94138803686098)) \G -``` - -``` text -Row 1: -────── -demangle(addressToSymbol(94138803686098)): DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const -``` - -Application de la fonction à la trace de la pile entière: - -``` sql -SELECT - arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') AS trace_functions -FROM system.trace_log -LIMIT 1 -\G -``` - -Le [arrayMap](higher-order-functions.md#higher_order_functions-array-map) permet de traiter chaque élément individuel de l' `trace` tableau par la `demangle` fonction. Le résultat de ce traitement que vous voyez dans l' `trace_functions` colonne de sortie. - -``` text -Row 1: -────── -trace_functions: DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const -DB::Aggregator::executeWithoutKeyImpl(char*&, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, DB::Arena*) const -DB::Aggregator::executeOnBlock(std::vector::immutable_ptr, std::allocator::immutable_ptr > >, unsigned long, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) -DB::Aggregator::executeOnBlock(DB::Block const&, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) -DB::Aggregator::execute(std::shared_ptr const&, DB::AggregatedDataVariants&) -DB::AggregatingBlockInputStream::readImpl() -DB::IBlockInputStream::read() -DB::ExpressionBlockInputStream::readImpl() -DB::IBlockInputStream::read() -DB::ExpressionBlockInputStream::readImpl() -DB::IBlockInputStream::read() -DB::AsynchronousBlockInputStream::calculate() -std::_Function_handler::_M_invoke(std::_Any_data const&) -ThreadPoolImpl::worker(std::_List_iterator) -ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const -ThreadPoolImpl::worker(std::_List_iterator) -execute_native_thread_routine -start_thread -clone -``` diff --git a/docs/fr/sql-reference/functions/ip-address-functions.md b/docs/fr/sql-reference/functions/ip-address-functions.md deleted file mode 100644 index 8beb40a534b..00000000000 --- a/docs/fr/sql-reference/functions/ip-address-functions.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 55 -toc_title: Travailler avec des adresses IP ---- - -# Fonctions pour travailler avec des adresses IP {#functions-for-working-with-ip-addresses} - -## IPv4NumToString (num) {#ipv4numtostringnum} - -Prend un numéro UInt32. Interprète comme une adresse IPv4 dans big endian. Renvoie une chaîne contenant l'adresse IPv4 correspondante au format A. B. C. d (Nombres séparés par des points sous forme décimale). - -## IPv4StringToNum (s) {#ipv4stringtonums} - -La fonction inverse de IPv4NumToString. Si L'adresse IPv4 a un format non valide, elle renvoie 0. - -## IPv4NumToStringClassC(num) {#ipv4numtostringclasscnum} - -Similaire à IPv4NumToString, mais en utilisant xxx au lieu du dernier octet. - -Exemple: - -``` sql -SELECT - IPv4NumToStringClassC(ClientIP) AS k, - count() AS c -FROM test.hits -GROUP BY k -ORDER BY c DESC -LIMIT 10 -``` - -``` text -┌─k──────────────┬─────c─┐ -│ 83.149.9.xxx │ 26238 │ -│ 217.118.81.xxx │ 26074 │ -│ 213.87.129.xxx │ 25481 │ -│ 83.149.8.xxx │ 24984 │ -│ 217.118.83.xxx │ 22797 │ -│ 78.25.120.xxx │ 22354 │ -│ 213.87.131.xxx │ 21285 │ -│ 78.25.121.xxx │ 20887 │ -│ 188.162.65.xxx │ 19694 │ -│ 83.149.48.xxx │ 17406 │ -└────────────────┴───────┘ -``` - -Depuis l'utilisation de ‘xxx’ est très inhabituel, cela peut être changé à l'avenir. Nous vous recommandons de ne pas compter sur le format exact de ce fragment. - -### IPv6NumToString (x) {#ipv6numtostringx} - -Accepte une valeur FixedString (16) contenant L'adresse IPv6 au format binaire. Renvoie une chaîne contenant cette adresse au format texte. -Les adresses IPv4 mappées IPv6 sont sorties au format:: ffff: 111.222.33.44. Exemple: - -``` sql -SELECT IPv6NumToString(toFixedString(unhex('2A0206B8000000000000000000000011'), 16)) AS addr -``` - -``` text -┌─addr─────────┐ -│ 2a02:6b8::11 │ -└──────────────┘ -``` - -``` sql -SELECT - IPv6NumToString(ClientIP6 AS k), - count() AS c -FROM hits_all -WHERE EventDate = today() AND substring(ClientIP6, 1, 12) != unhex('00000000000000000000FFFF') -GROUP BY k -ORDER BY c DESC -LIMIT 10 -``` - -``` text -┌─IPv6NumToString(ClientIP6)──────────────┬─────c─┐ -│ 2a02:2168:aaa:bbbb::2 │ 24695 │ -│ 2a02:2698:abcd:abcd:abcd:abcd:8888:5555 │ 22408 │ -│ 2a02:6b8:0:fff::ff │ 16389 │ -│ 2a01:4f8:111:6666::2 │ 16016 │ -│ 2a02:2168:888:222::1 │ 15896 │ -│ 2a01:7e00::ffff:ffff:ffff:222 │ 14774 │ -│ 2a02:8109:eee:ee:eeee:eeee:eeee:eeee │ 14443 │ -│ 2a02:810b:8888:888:8888:8888:8888:8888 │ 14345 │ -│ 2a02:6b8:0:444:4444:4444:4444:4444 │ 14279 │ -│ 2a01:7e00::ffff:ffff:ffff:ffff │ 13880 │ -└─────────────────────────────────────────┴───────┘ -``` - -``` sql -SELECT - IPv6NumToString(ClientIP6 AS k), - count() AS c -FROM hits_all -WHERE EventDate = today() -GROUP BY k -ORDER BY c DESC -LIMIT 10 -``` - -``` text -┌─IPv6NumToString(ClientIP6)─┬──────c─┐ -│ ::ffff:94.26.111.111 │ 747440 │ -│ ::ffff:37.143.222.4 │ 529483 │ -│ ::ffff:5.166.111.99 │ 317707 │ -│ ::ffff:46.38.11.77 │ 263086 │ -│ ::ffff:79.105.111.111 │ 186611 │ -│ ::ffff:93.92.111.88 │ 176773 │ -│ ::ffff:84.53.111.33 │ 158709 │ -│ ::ffff:217.118.11.22 │ 154004 │ -│ ::ffff:217.118.11.33 │ 148449 │ -│ ::ffff:217.118.11.44 │ 148243 │ -└────────────────────────────┴────────┘ -``` - -## IPv6StringToNum (s) {#ipv6stringtonums} - -La fonction inverse de IPv6NumToString. Si L'adresse IPv6 a un format non valide, elle renvoie une chaîne d'octets null. -HEX peut être en majuscules ou en minuscules. - -## IPv4ToIPv6 (x) {#ipv4toipv6x} - -Prend un `UInt32` nombre. Interprète comme une adresse IPv4 dans [big endian](https://en.wikipedia.org/wiki/Endianness). Retourne un `FixedString(16)` valeur contenant l'adresse IPv6 au format binaire. Exemple: - -``` sql -SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr -``` - -``` text -┌─addr───────────────┐ -│ ::ffff:192.168.0.1 │ -└────────────────────┘ -``` - -## cutIPv6 (x, bytesToCutForIPv6, bytesToCutForIPv4) {#cutipv6x-bytestocutforipv6-bytestocutforipv4} - -Accepte une valeur FixedString (16) contenant L'adresse IPv6 au format binaire. Renvoie une chaîne contenant l'adresse du nombre spécifié d'octets retiré au format texte. Exemple: - -``` sql -WITH - IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D') AS ipv6, - IPv4ToIPv6(IPv4StringToNum('192.168.0.1')) AS ipv4 -SELECT - cutIPv6(ipv6, 2, 0), - cutIPv6(ipv4, 0, 2) -``` - -``` text -┌─cutIPv6(ipv6, 2, 0)─────────────────┬─cutIPv6(ipv4, 0, 2)─┐ -│ 2001:db8:ac10:fe01:feed:babe:cafe:0 │ ::ffff:192.168.0.0 │ -└─────────────────────────────────────┴─────────────────────┘ -``` - -## Ipv4cirtorange (ipv4, Cidr), {#ipv4cidrtorangeipv4-cidr} - -Accepte un IPv4 et une valeur UInt8 contenant [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Renvoie un tuple avec deux IPv4 contenant la plage inférieure et la plage supérieure du sous-réseau. - -``` sql -SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) -``` - -``` text -┌─IPv4CIDRToRange(toIPv4('192.168.5.2'), 16)─┐ -│ ('192.168.0.0','192.168.255.255') │ -└────────────────────────────────────────────┘ -``` - -## Ipv6cirtorange (ipv6, Cidr), {#ipv6cidrtorangeipv6-cidr} - -Accepte un IPv6 et une valeur UInt8 contenant le CIDR. Renvoie un tuple avec deux IPv6 contenant la plage inférieure et la plage supérieure du sous-réseau. - -``` sql -SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); -``` - -``` text -┌─IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32)─┐ -│ ('2001:db8::','2001:db8:ffff:ffff:ffff:ffff:ffff:ffff') │ -└────────────────────────────────────────────────────────────────────────┘ -``` - -## toipv4 (chaîne) {#toipv4string} - -Un alias `IPv4StringToNum()` cela prend une forme de chaîne D'adresse IPv4 et renvoie la valeur de [IPv4](../../sql-reference/data-types/domains/ipv4.md) type, qui est binaire égal à la valeur renvoyée par `IPv4StringToNum()`. - -``` sql -WITH - '171.225.130.45' as IPv4_string -SELECT - toTypeName(IPv4StringToNum(IPv4_string)), - toTypeName(toIPv4(IPv4_string)) -``` - -``` text -┌─toTypeName(IPv4StringToNum(IPv4_string))─┬─toTypeName(toIPv4(IPv4_string))─┐ -│ UInt32 │ IPv4 │ -└──────────────────────────────────────────┴─────────────────────────────────┘ -``` - -``` sql -WITH - '171.225.130.45' as IPv4_string -SELECT - hex(IPv4StringToNum(IPv4_string)), - hex(toIPv4(IPv4_string)) -``` - -``` text -┌─hex(IPv4StringToNum(IPv4_string))─┬─hex(toIPv4(IPv4_string))─┐ -│ ABE1822D │ ABE1822D │ -└───────────────────────────────────┴──────────────────────────┘ -``` - -## toipv6 (chaîne) {#toipv6string} - -Un alias `IPv6StringToNum()` cela prend une forme de chaîne D'adresse IPv6 et renvoie la valeur de [IPv6](../../sql-reference/data-types/domains/ipv6.md) type, qui est binaire égal à la valeur renvoyée par `IPv6StringToNum()`. - -``` sql -WITH - '2001:438:ffff::407d:1bc1' as IPv6_string -SELECT - toTypeName(IPv6StringToNum(IPv6_string)), - toTypeName(toIPv6(IPv6_string)) -``` - -``` text -┌─toTypeName(IPv6StringToNum(IPv6_string))─┬─toTypeName(toIPv6(IPv6_string))─┐ -│ FixedString(16) │ IPv6 │ -└──────────────────────────────────────────┴─────────────────────────────────┘ -``` - -``` sql -WITH - '2001:438:ffff::407d:1bc1' as IPv6_string -SELECT - hex(IPv6StringToNum(IPv6_string)), - hex(toIPv6(IPv6_string)) -``` - -``` text -┌─hex(IPv6StringToNum(IPv6_string))─┬─hex(toIPv6(IPv6_string))─────────┐ -│ 20010438FFFF000000000000407D1BC1 │ 20010438FFFF000000000000407D1BC1 │ -└───────────────────────────────────┴──────────────────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) diff --git a/docs/fr/sql-reference/functions/json-functions.md b/docs/fr/sql-reference/functions/json-functions.md deleted file mode 100644 index 5f92c99d0f5..00000000000 --- a/docs/fr/sql-reference/functions/json-functions.md +++ /dev/null @@ -1,297 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 56 -toc_title: Travailler avec JSON ---- - -# Fonctions pour travailler avec JSON {#functions-for-working-with-json} - -Dans Yandex.Metrica, JSON est transmis par les utilisateurs en tant que paramètres de session. Il y a quelques fonctions spéciales pour travailler avec ce JSON. (Bien que dans la plupart des cas, les JSONs soient en outre prétraités et les valeurs résultantes sont placées dans des colonnes séparées dans leur format traité.) Toutes ces fonctions sont basées sur des hypothèses fortes sur ce que le JSON peut être, mais elles essaient de faire le moins possible pour faire le travail. - -Les hypothèses suivantes sont apportées: - -1. Le nom du champ (argument de fonction) doit être une constante. -2. Le nom du champ est en quelque sorte codé canoniquement dans JSON. Exemple: `visitParamHas('{"abc":"def"}', 'abc') = 1`, mais `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` -3. Les champs sont recherchés à n'importe quel niveau d'imbrication, sans discrimination. S'il y a plusieurs champs correspondants, la première occurrence est utilisé. -4. Le JSON n'a pas de caractères d'espace en dehors des littéraux de chaîne. - -## visitParamHas(params, nom) {#visitparamhasparams-name} - -Vérifie s'il existe un champ avec ‘name’ nom. - -## visitParamExtractUInt(params, nom) {#visitparamextractuintparams-name} - -Analyse UInt64 à partir de la valeur du champ nommé ‘name’. Si c'est un champ de type chaîne, il tente d'analyser un numéro à partir du début de la chaîne. Si le champ n'existe pas, ou s'il existe mais ne contient pas de nombre, il renvoie 0. - -## visitParamExtractInt(params, name) {#visitparamextractintparams-name} - -Le même que pour Int64. - -## visitParamExtractFloat(params, nom) {#visitparamextractfloatparams-name} - -Le même que pour Float64. - -## visitParamExtractBool(params, nom) {#visitparamextractboolparams-name} - -Analyse d'une valeur vrai/faux. Le résultat est UInt8. - -## visitParamExtractRaw(params, nom) {#visitparamextractrawparams-name} - -Retourne la valeur d'un champ, y compris les séparateurs. - -Exemple: - -``` sql -visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' -visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' -``` - -## visitParamExtractString(params, nom) {#visitparamextractstringparams-name} - -Analyse la chaîne entre guillemets doubles. La valeur est sans échappement. Si l'échappement échoue, il renvoie une chaîne vide. - -Exemple: - -``` sql -visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' -visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' -visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' -visitParamExtractString('{"abc":"hello}', 'abc') = '' -``` - -Il n'y a actuellement aucun support pour les points de code dans le format `\uXXXX\uYYYY` qui ne proviennent pas du plan multilingue de base (ils sont convertis en CESU-8 au lieu de UTF-8). - -Les fonctions suivantes sont basées sur [simdjson](https://github.com/lemire/simdjson) conçu pour des exigences D'analyse JSON plus complexes. L'hypothèse 2 mentionnée ci-dessus s'applique toujours. - -## isValidJSON (json) {#isvalidjsonjson} - -Vérifie que la chaîne est un json valide. - -Exemple: - -``` sql -SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}') = 1 -SELECT isValidJSON('not a json') = 0 -``` - -## JSONHas(json\[, indices_or_keys\]…) {#jsonhasjson-indices-or-keys} - -Si la valeur existe dans le document JSON, `1` sera retourné. - -Si la valeur n'existe pas, `0` sera retourné. - -Exemple: - -``` sql -SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 1 -SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4) = 0 -``` - -`indices_or_keys` est une liste de zéro ou plusieurs arguments chacun d'entre eux peut être une chaîne ou un entier. - -- String = membre d'objet d'accès par clé. -- Entier positif = accédez au n-ème membre / clé depuis le début. -- Entier négatif = accédez au n-ème membre / clé à partir de la fin. - -Minimum de l'indice de l'élément est 1. Ainsi, l'élément 0 n'existe pas. - -Vous pouvez utiliser des entiers pour accéder à la fois aux tableaux JSON et aux objets JSON. - -Ainsi, par exemple: - -``` sql -SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'a' -SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2) = 'b' -SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1) = 'b' -SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' -SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' -``` - -## JSONLength(json\[, indices_or_keys\]…) {#jsonlengthjson-indices-or-keys} - -Renvoie la longueur D'un tableau JSON ou d'un objet JSON. - -Si la valeur n'existe pas ou a un mauvais type, `0` sera retourné. - -Exemple: - -``` sql -SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 -SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 -``` - -## JSONType(json\[, indices_or_keys\]…) {#jsontypejson-indices-or-keys} - -De retour le type d'une valeur JSON. - -Si la valeur n'existe pas, `Null` sera retourné. - -Exemple: - -``` sql -SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}') = 'Object' -SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' -SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' -``` - -## JSONExtractUInt(json\[, indices_or_keys\]…) {#jsonextractuintjson-indices-or-keys} - -## JSONExtractInt(json\[, indices_or_keys\]…) {#jsonextractintjson-indices-or-keys} - -## JSONExtractFloat(json\[, indices_or_keys\]…) {#jsonextractfloatjson-indices-or-keys} - -## JSONExtractBool(json\[, indices_or_keys\]…) {#jsonextractbooljson-indices-or-keys} - -Analyse un JSON et extrait une valeur. Ces fonctions sont similaires à `visitParam` fonction. - -Si la valeur n'existe pas ou a un mauvais type, `0` sera retourné. - -Exemple: - -``` sql -SELECT JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1) = -100 -SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200.0 -SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 -``` - -## JSONExtractString(json\[, indices_or_keys\]…) {#jsonextractstringjson-indices-or-keys} - -Analyse un JSON et extrait une chaîne. Cette fonction est similaire à `visitParamExtractString` fonction. - -Si la valeur n'existe pas ou a un mauvais type, une chaîne vide est retournée. - -La valeur est sans échappement. Si l'échappement échoue, il renvoie une chaîne vide. - -Exemple: - -``` sql -SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'hello' -SELECT JSONExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' -SELECT JSONExtractString('{"abc":"\\u263a"}', 'abc') = '☺' -SELECT JSONExtractString('{"abc":"\\u263"}', 'abc') = '' -SELECT JSONExtractString('{"abc":"hello}', 'abc') = '' -``` - -## JSONExtract(json\[, indices_or_keys…\], Return_type) {#jsonextractjson-indices-or-keys-return-type} - -Analyse un JSON et extrait une valeur du type de données clickhouse donné. - -C'est une généralisation de la précédente `JSONExtract` fonction. -Cela signifie -`JSONExtract(..., 'String')` retourne exactement le même que `JSONExtractString()`, -`JSONExtract(..., 'Float64')` retourne exactement le même que `JSONExtractFloat()`. - -Exemple: - -``` sql -SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))') = ('hello',[-100,200,300]) -SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)') = ([-100,200,300],'hello') -SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))') = [-100, NULL, NULL] -SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)') = NULL -SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8') = 1 -SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Thursday' -SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Friday' -``` - -## JSONExtractKeysAndValues(json\[, indices_or_keys…\], Value_type) {#jsonextractkeysandvaluesjson-indices-or-keys-value-type} - -Analyse les paires clé-valeur à partir D'un JSON où les valeurs sont du type de données clickhouse donné. - -Exemple: - -``` sql -SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)] -``` - -## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys} - -Renvoie une partie de JSON en tant que chaîne non analysée. - -Si la pièce n'existe pas ou a un mauvais type, une chaîne vide est retournée. - -Exemple: - -``` sql -SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' -``` - -## JSONExtractArrayRaw(json\[, indices_or_keys…\]) {#jsonextractarrayrawjson-indices-or-keys} - -Retourne un tableau avec des éléments de tableau JSON, chacun représenté comme une chaîne non analysée. - -Si la pièce n'existe pas ou n'est pas de tableau, un tableau vide sera retournée. - -Exemple: - -``` sql -SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' -``` - -## JSONExtractKeysAndValuesRaw {#json-extract-keys-and-values-raw} - -Extrait les données brutes d'un objet JSON. - -**Syntaxe** - -``` sql -JSONExtractKeysAndValuesRaw(json[, p, a, t, h]) -``` - -**Paramètre** - -- `json` — [Chaîne](../data-types/string.md) avec JSON valide. -- `p, a, t, h` — Comma-separated indices or keys that specify the path to the inner field in a nested JSON object. Each argument can be either a [chaîne](../data-types/string.md) pour obtenir le champ par la touche ou un [entier](../data-types/int-uint.md) pour obtenir le N-ème champ (indexé à partir de 1, les entiers négatifs comptent à partir de la fin). S'il n'est pas défini, le JSON entier est analysé en tant qu'objet de niveau supérieur. Paramètre facultatif. - -**Valeurs renvoyées** - -- Tableau avec `('key', 'value')` tuple. Les deux membres du tuple sont des chaînes. -- Tableau vide si l'objet demandé n'existe pas, ou entrée JSON n'est pas valide. - -Type: [Tableau](../data-types/array.md)([Tuple](../data-types/tuple.md)([Chaîne](../data-types/string.md), [Chaîne](../data-types/string.md)). - -**Exemple** - -Requête: - -``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}') -``` - -Résultat: - -``` text -┌─JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}')─┐ -│ [('a','[-100,200]'),('b','{"c":{"d":"hello","f":"world"}}')] │ -└──────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -Requête: - -``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', 'b') -``` - -Résultat: - -``` text -┌─JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', 'b')─┐ -│ [('c','{"d":"hello","f":"world"}')] │ -└───────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -Requête: - -``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', -1, 'c') -``` - -Résultat: - -``` text -┌─JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', -1, 'c')─┐ -│ [('d','"hello"'),('f','"world"')] │ -└───────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/fr/sql-reference/functions/logical-functions.md b/docs/fr/sql-reference/functions/logical-functions.md deleted file mode 100644 index d01d9e02088..00000000000 --- a/docs/fr/sql-reference/functions/logical-functions.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 37 -toc_title: Logique ---- - -# Les Fonctions Logiques {#logical-functions} - -Les fonctions logiques acceptent tous les types numériques, mais renvoient un nombre UInt8 égal à 0 ou 1. - -Zéro comme argument est considéré “false,” alors que toute valeur non nulle est considérée comme “true”. - -## et, et opérateur {#and-and-operator} - -## ou, ou opérateur {#or-or-operator} - -## pas, pas opérateur {#not-not-operator} - -## xor {#xor} - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/fr/sql-reference/functions/machine-learning-functions.md b/docs/fr/sql-reference/functions/machine-learning-functions.md deleted file mode 100644 index 2212e0caa5a..00000000000 --- a/docs/fr/sql-reference/functions/machine-learning-functions.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 64 -toc_title: Fonctions D'Apprentissage Automatique ---- - -# Fonctions D'Apprentissage Automatique {#machine-learning-functions} - -## evalMLMethod (prédiction) {#machine_learning_methods-evalmlmethod} - -Prédiction utilisant des modèles de régression ajustés utilise `evalMLMethod` fonction. Voir le lien dans la `linearRegression`. - -### Régression Linéaire Stochastique {#stochastic-linear-regression} - -Le [stochasticLinearRegression](../../sql-reference/aggregate-functions/reference.md#agg_functions-stochasticlinearregression) la fonction d'agrégat implémente une méthode de descente de gradient stochastique utilisant un modèle linéaire et une fonction de perte MSE. Utiliser `evalMLMethod` prédire sur de nouvelles données. - -### Régression Logistique Stochastique {#stochastic-logistic-regression} - -Le [stochasticLogisticRegression](../../sql-reference/aggregate-functions/reference.md#agg_functions-stochasticlogisticregression) la fonction d'agrégation implémente la méthode de descente de gradient stochastique pour le problème de classification binaire. Utiliser `evalMLMethod` prédire sur de nouvelles données. diff --git a/docs/fr/sql-reference/functions/math-functions.md b/docs/fr/sql-reference/functions/math-functions.md deleted file mode 100644 index f5dff150caa..00000000000 --- a/docs/fr/sql-reference/functions/math-functions.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 44 -toc_title: "Math\xE9matique" ---- - -# Fonctions Mathématiques {#mathematical-functions} - -Toutes les fonctions renvoient un nombre Float64. La précision du résultat est proche de la précision maximale possible, mais le résultat peut ne pas coïncider avec le nombre représentable de la machine le plus proche du nombre réel correspondant. - -## e() {#e} - -Renvoie un nombre Float64 proche du nombre E. - -## pi() {#pi} - -Returns a Float64 number that is close to the number π. - -## exp (x) {#expx} - -Accepte un argument numérique et renvoie un Float64 nombre proche de l'exposant de l'argument. - -## log(x), ln (x) {#logx-lnx} - -Accepte un argument numérique et renvoie un nombre Float64 proche du logarithme naturel de l'argument. - -## exp2 (x) {#exp2x} - -Accepte un argument numérique et renvoie un nombre Float64 proche de 2 à la puissance de X. - -## log2 (x) {#log2x} - -Accepte un argument numérique et renvoie un Float64 nombre proximité du logarithme binaire de l'argument. - -## exp10 (x) {#exp10x} - -Accepte un argument numérique et renvoie un nombre Float64 proche de 10 à la puissance de X. - -## log10 (x) {#log10x} - -Accepte un argument numérique et renvoie un nombre Float64 proche du logarithme décimal de l'argument. - -## sqrt (x) {#sqrtx} - -Accepte un argument numérique et renvoie un Float64 nombre proche de la racine carrée de l'argument. - -## cbrt (x) {#cbrtx} - -Accepte un argument numérique et renvoie un Float64 nombre proche de la racine cubique de l'argument. - -## erf (x) {#erfx} - -Si ‘x’ est non négatif, alors `erf(x / σ√2)` est la probabilité qu'une variable aléatoire ayant une distribution normale avec un écart type ‘σ’ prend la valeur qui est séparée de la valeur attendue par plus de ‘x’. - -Exemple (règle de trois sigma): - -``` sql -SELECT erf(3 / sqrt(2)) -``` - -``` text -┌─erf(divide(3, sqrt(2)))─┐ -│ 0.9973002039367398 │ -└─────────────────────────┘ -``` - -## erfc (x) {#erfcx} - -Accepte un argument numérique et renvoie un nombre Float64 proche de 1-erf (x), mais sans perte de précision pour ‘x’ valeur. - -## lgamma (x) {#lgammax} - -Le logarithme de la fonction gamma. - -## tgamma (x) {#tgammax} - -La fonction Gamma. - -## sin (x) {#sinx} - -Sine. - -## cos (x) {#cosx} - -Cosinus. - -## tan (x) {#tanx} - -Tangente. - -## asin (x) {#asinx} - -Le sinus d'arc. - -## acos (x) {#acosx} - -Le cosinus de l'arc. - -## atan (x) {#atanx} - -L'arc tangente. - -## pow(x, y), la puissance(x, y) {#powx-y-powerx-y} - -Prend deux arguments numériques x et Y. renvoie un nombre Float64 proche de x à la puissance de Y. - -## intExp2 {#intexp2} - -Accepte un argument numérique et renvoie un nombre UInt64 proche de 2 à la puissance de X. - -## intExp10 {#intexp10} - -Accepte un argument numérique et renvoie un nombre UInt64 proche de 10 à la puissance de X. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/fr/sql-reference/functions/other-functions.md b/docs/fr/sql-reference/functions/other-functions.md deleted file mode 100644 index e5c6abedd75..00000000000 --- a/docs/fr/sql-reference/functions/other-functions.md +++ /dev/null @@ -1,1205 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 66 -toc_title: Autre ---- - -# D'Autres Fonctions {#other-functions} - -## hôte() {#hostname} - -Renvoie une chaîne avec le nom de l'hôte sur lequel cette fonction a été exécutée. Pour le traitement distribué, c'est le nom du serveur distant, si la fonction est exécutée sur un serveur distant. - -## getMacro {#getmacro} - -Obtient une valeur nommée à partir [macro](../../operations/server-configuration-parameters/settings.md#macros) la section de la configuration du serveur. - -**Syntaxe** - -``` sql -getMacro(name); -``` - -**Paramètre** - -- `name` — Name to retrieve from the `macros` section. [Chaîne](../../sql-reference/data-types/string.md#string). - -**Valeur renvoyée** - -- Valeur de la macro spécifiée. - -Type: [Chaîne](../../sql-reference/data-types/string.md). - -**Exemple** - -Exemple `macros` section dans le fichier de configuration du serveur: - -``` xml - - Value - -``` - -Requête: - -``` sql -SELECT getMacro('test'); -``` - -Résultat: - -``` text -┌─getMacro('test')─┐ -│ Value │ -└──────────────────┘ -``` - -Une méthode alternative pour obtenir la même valeur: - -``` sql -SELECT * FROM system.macros -WHERE macro = 'test'; -``` - -``` text -┌─macro─┬─substitution─┐ -│ test │ Value │ -└───────┴──────────────┘ -``` - -## FQDN {#fqdn} - -Retourne le nom de domaine pleinement qualifié. - -**Syntaxe** - -``` sql -fqdn(); -``` - -Cette fonction est insensible à la casse. - -**Valeur renvoyée** - -- Chaîne avec le nom de domaine complet. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT FQDN(); -``` - -Résultat: - -``` text -┌─FQDN()──────────────────────────┐ -│ clickhouse.ru-central1.internal │ -└─────────────────────────────────┘ -``` - -## basename {#basename} - -Extrait la partie finale d'une chaîne après la dernière barre oblique ou barre oblique inverse. Cette fonction est souvent utilisée pour extraire le nom de fichier d'un chemin. - -``` sql -basename( expr ) -``` - -**Paramètre** - -- `expr` — Expression resulting in a [Chaîne](../../sql-reference/data-types/string.md) type de valeur. Tous les antislashs doivent être échappés dans la valeur résultante. - -**Valeur Renvoyée** - -Une chaîne de caractères qui contient: - -- La partie finale d'une chaîne après la dernière barre oblique ou barre oblique inverse. - - If the input string contains a path ending with slash or backslash, for example, `/` or `c:\`, the function returns an empty string. - -- La chaîne d'origine s'il n'y a pas de barres obliques ou de barres obliques inverses. - -**Exemple** - -``` sql -SELECT 'some/long/path/to/file' AS a, basename(a) -``` - -``` text -┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ -│ some\long\path\to\file │ file │ -└────────────────────────┴────────────────────────────────────────┘ -``` - -``` sql -SELECT 'some\\long\\path\\to\\file' AS a, basename(a) -``` - -``` text -┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ -│ some\long\path\to\file │ file │ -└────────────────────────┴────────────────────────────────────────┘ -``` - -``` sql -SELECT 'some-file-name' AS a, basename(a) -``` - -``` text -┌─a──────────────┬─basename('some-file-name')─┐ -│ some-file-name │ some-file-name │ -└────────────────┴────────────────────────────┘ -``` - -## visibleWidth (x) {#visiblewidthx} - -Calcule la largeur approximative lors de la sortie des valeurs vers la console au format texte (séparé par des tabulations). -Cette fonction est utilisée par le système pour implémenter de jolis formats. - -`NULL` est représenté comme une chaîne correspondant à `NULL` dans `Pretty` format. - -``` sql -SELECT visibleWidth(NULL) -``` - -``` text -┌─visibleWidth(NULL)─┐ -│ 4 │ -└────────────────────┘ -``` - -## toTypeName (x) {#totypenamex} - -Renvoie une chaîne contenant le nom du type de l'argument passé. - -Si `NULL` est passé à la fonction en entrée, puis il renvoie le `Nullable(Nothing)` type, ce qui correspond à un interne `NULL` représentation à ClickHouse. - -## la taille de bloc() {#function-blocksize} - -Récupère la taille du bloc. -Dans ClickHouse, les requêtes sont toujours exécutées sur des blocs (ensembles de parties de colonne). Cette fonction permet d'obtenir la taille du bloc pour lequel vous l'avez appelé. - -## matérialiser (x) {#materializex} - -Transforme une constante dans une colonne contenant une seule valeur. -Dans ClickHouse, les colonnes complètes et les constantes sont représentées différemment en mémoire. Les fonctions fonctionnent différemment pour les arguments constants et les arguments normaux (un code différent est exécuté), bien que le résultat soit presque toujours le même. Cette fonction sert à déboguer ce comportement. - -## ignore(…) {#ignore} - -Accepte tous les arguments, y compris `NULL`. Renvoie toujours 0. -Cependant, l'argument est toujours évalué. Cela peut être utilisé pour les benchmarks. - -## sommeil(secondes) {#sleepseconds} - -Dormir ‘seconds’ secondes sur chaque bloc de données. Vous pouvez spécifier un nombre entier ou un nombre à virgule flottante. - -## sleepEachRow (secondes) {#sleepeachrowseconds} - -Dormir ‘seconds’ secondes sur chaque ligne. Vous pouvez spécifier un nombre entier ou un nombre à virgule flottante. - -## currentDatabase() {#currentdatabase} - -Retourne le nom de la base de données actuelle. -Vous pouvez utiliser cette fonction dans les paramètres du moteur de table dans une requête CREATE TABLE où vous devez spécifier la base de données. - -## currentUser() {#other-function-currentuser} - -Renvoie la connexion de l'utilisateur actuel. La connexion de l'utilisateur, cette requête initiée, sera renvoyée en cas de requête distibuted. - -``` sql -SELECT currentUser(); -``` - -Alias: `user()`, `USER()`. - -**Valeurs renvoyées** - -- Connexion de l'utilisateur actuel. -- Connexion de l'utilisateur qui a lancé la requête en cas de requête distribuée. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT currentUser(); -``` - -Résultat: - -``` text -┌─currentUser()─┐ -│ default │ -└───────────────┘ -``` - -## isConstant {#is-constant} - -Vérifie si l'argument est une expression constante. - -A constant expression means an expression whose resulting value is known at the query analysis (i.e. before execution). For example, expressions over [littéral](../syntax.md#literals) sont des expressions constantes. - -La fonction est destinée au développement, au débogage et à la démonstration. - -**Syntaxe** - -``` sql -isConstant(x) -``` - -**Paramètre** - -- `x` — Expression to check. - -**Valeurs renvoyées** - -- `1` — `x` est constante. -- `0` — `x` est non constante. - -Type: [UInt8](../data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT isConstant(x + 1) FROM (SELECT 43 AS x) -``` - -Résultat: - -``` text -┌─isConstant(plus(x, 1))─┐ -│ 1 │ -└────────────────────────┘ -``` - -Requête: - -``` sql -WITH 3.14 AS pi SELECT isConstant(cos(pi)) -``` - -Résultat: - -``` text -┌─isConstant(cos(pi))─┐ -│ 1 │ -└─────────────────────┘ -``` - -Requête: - -``` sql -SELECT isConstant(number) FROM numbers(1) -``` - -Résultat: - -``` text -┌─isConstant(number)─┐ -│ 0 │ -└────────────────────┘ -``` - -## isFinite (x) {#isfinitex} - -Accepte Float32 et Float64 et renvoie UInt8 égal à 1 si l'argument n'est pas infini et pas un NaN, sinon 0. - -## isInfinite (x) {#isinfinitex} - -Accepte Float32 et Float64 et renvoie UInt8 égal à 1 si l'argument est infini, sinon 0. Notez que 0 est retourné pour un NaN. - -## ifNotFinite {#ifnotfinite} - -Vérifie si la valeur à virgule flottante est finie. - -**Syntaxe** - - ifNotFinite(x,y) - -**Paramètre** - -- `x` — Value to be checked for infinity. Type: [Flottant\*](../../sql-reference/data-types/float.md). -- `y` — Fallback value. Type: [Flottant\*](../../sql-reference/data-types/float.md). - -**Valeur renvoyée** - -- `x` si `x` est finie. -- `y` si `x` n'est pas finie. - -**Exemple** - -Requête: - - SELECT 1/0 as infimum, ifNotFinite(infimum,42) - -Résultat: - - ┌─infimum─┬─ifNotFinite(divide(1, 0), 42)─┐ - │ inf │ 42 │ - └─────────┴───────────────────────────────┘ - -Vous pouvez obtenir un résultat similaire en utilisant [opérateur ternaire](conditional-functions.md#ternary-operator): `isFinite(x) ? x : y`. - -## isNaN (x) {#isnanx} - -Accepte Float32 et Float64 et renvoie UInt8 égal à 1 si l'argument est un NaN, sinon 0. - -## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} - -Accepte les chaînes constantes: nom de la base de données, nom de la table et nom de la colonne. Renvoie une expression constante UInt8 égale à 1 s'il y a une colonne, sinon 0. Si le paramètre hostname est défini, le test s'exécutera sur un serveur distant. -La fonction renvoie une exception si la table n'existe pas. -Pour les éléments imbriqués structure des données, la fonction vérifie l'existence d'une colonne. Pour la structure de données imbriquée elle-même, la fonction renvoie 0. - -## bar {#function-bar} - -Permet de construire un diagramme unicode-art. - -`bar(x, min, max, width)` dessine une bande avec une largeur proportionnelle à `(x - min)` et égale à `width` les caractères lors de la `x = max`. - -Paramètre: - -- `x` — Size to display. -- `min, max` — Integer constants. The value must fit in `Int64`. -- `width` — Constant, positive integer, can be fractional. - -La bande dessinée avec précision à un huitième d'un symbole. - -Exemple: - -``` sql -SELECT - toHour(EventTime) AS h, - count() AS c, - bar(c, 0, 600000, 20) AS bar -FROM test.hits -GROUP BY h -ORDER BY h ASC -``` - -``` text -┌──h─┬──────c─┬─bar────────────────┐ -│ 0 │ 292907 │ █████████▋ │ -│ 1 │ 180563 │ ██████ │ -│ 2 │ 114861 │ ███▋ │ -│ 3 │ 85069 │ ██▋ │ -│ 4 │ 68543 │ ██▎ │ -│ 5 │ 78116 │ ██▌ │ -│ 6 │ 113474 │ ███▋ │ -│ 7 │ 170678 │ █████▋ │ -│ 8 │ 278380 │ █████████▎ │ -│ 9 │ 391053 │ █████████████ │ -│ 10 │ 457681 │ ███████████████▎ │ -│ 11 │ 493667 │ ████████████████▍ │ -│ 12 │ 509641 │ ████████████████▊ │ -│ 13 │ 522947 │ █████████████████▍ │ -│ 14 │ 539954 │ █████████████████▊ │ -│ 15 │ 528460 │ █████████████████▌ │ -│ 16 │ 539201 │ █████████████████▊ │ -│ 17 │ 523539 │ █████████████████▍ │ -│ 18 │ 506467 │ ████████████████▊ │ -│ 19 │ 520915 │ █████████████████▎ │ -│ 20 │ 521665 │ █████████████████▍ │ -│ 21 │ 542078 │ ██████████████████ │ -│ 22 │ 493642 │ ████████████████▍ │ -│ 23 │ 400397 │ █████████████▎ │ -└────┴────────┴────────────────────┘ -``` - -## transformer {#transform} - -Transforme une valeur en fonction explicitement définis cartographie de certains éléments à l'autre. -Il existe deux variantes de cette fonction: - -### de transformation(x, array_from, array_to, par défaut) {#transformx-array-from-array-to-default} - -`x` – What to transform. - -`array_from` – Constant array of values for converting. - -`array_to` – Constant array of values to convert the values in ‘from’ de. - -`default` – Which value to use if ‘x’ n'est pas égale à une des valeurs de ‘from’. - -`array_from` et `array_to` – Arrays of the same size. - -Type: - -`transform(T, Array(T), Array(U), U) -> U` - -`T` et `U` peuvent être des types numériques, chaîne ou Date ou DateTime. -Lorsque la même lettre est indiquée (T ou U), pour les types numériques, il se peut qu'il ne s'agisse pas de types correspondants, mais de types ayant un type commun. -Par exemple, le premier argument peut avoir le type Int64, tandis que le second a le type Array(UInt16). - -Si l' ‘x’ la valeur est égale à l'un des éléments dans la ‘array_from’ tableau, elle renvoie l'élément existant (qui est numéroté de même) de la ‘array_to’ tableau. Sinon, elle renvoie ‘default’. S'il y a plusieurs éléments correspondants dans ‘array_from’ il renvoie l'un des matches. - -Exemple: - -``` sql -SELECT - transform(SearchEngineID, [2, 3], ['Yandex', 'Google'], 'Other') AS title, - count() AS c -FROM test.hits -WHERE SearchEngineID != 0 -GROUP BY title -ORDER BY c DESC -``` - -``` text -┌─title─────┬──────c─┐ -│ Yandex │ 498635 │ -│ Google │ 229872 │ -│ Other │ 104472 │ -└───────────┴────────┘ -``` - -### de transformation(x, array_from, array_to) {#transformx-array-from-array-to} - -Diffère de la première variation en ce que le ‘default’ l'argument est omis. -Si l' ‘x’ la valeur est égale à l'un des éléments dans la ‘array_from’ tableau, elle renvoie l'élément correspondant (qui est numéroté de même) de la ‘array_to’ tableau. Sinon, elle renvoie ‘x’. - -Type: - -`transform(T, Array(T), Array(T)) -> T` - -Exemple: - -``` sql -SELECT - transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, - count() AS c -FROM test.hits -GROUP BY domain(Referer) -ORDER BY count() DESC -LIMIT 10 -``` - -``` text -┌─s──────────────┬───────c─┐ -│ │ 2906259 │ -│ www.yandex │ 867767 │ -│ ███████.ru │ 313599 │ -│ mail.yandex.ru │ 107147 │ -│ ██████.ru │ 100355 │ -│ █████████.ru │ 65040 │ -│ news.yandex.ru │ 64515 │ -│ ██████.net │ 59141 │ -│ example.com │ 57316 │ -└────────────────┴─────────┘ -``` - -## formatReadableSize (x) {#formatreadablesizex} - -Accepte la taille (nombre d'octets). Renvoie une taille arrondie avec un suffixe (KiB, MiB, etc.) comme une chaîne de caractères. - -Exemple: - -``` sql -SELECT - arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, - formatReadableSize(filesize_bytes) AS filesize -``` - -``` text -┌─filesize_bytes─┬─filesize───┐ -│ 1 │ 1.00 B │ -│ 1024 │ 1.00 KiB │ -│ 1048576 │ 1.00 MiB │ -│ 192851925 │ 183.92 MiB │ -└────────────────┴────────────┘ -``` - -## moins (a, b) {#leasta-b} - -Renvoie la plus petite valeur de a et b. - -## la plus grande(a, b) {#greatesta-b} - -Renvoie la plus grande valeur de a et B. - -## le temps de disponibilité() {#uptime} - -Renvoie la disponibilité du serveur en quelques secondes. - -## version() {#version} - -Renvoie la version du serveur sous forme de chaîne. - -## fuseau() {#timezone} - -Retourne le fuseau horaire du serveur. - -## blockNumber {#blocknumber} - -Renvoie le numéro de séquence du bloc de données où se trouve la ligne. - -## rowNumberInBlock {#function-rownumberinblock} - -Renvoie le numéro de séquence de la ligne dans le bloc de données. Différents blocs de données sont toujours recalculés. - -## rowNumberInAllBlocks() {#rownumberinallblocks} - -Renvoie le numéro de séquence de la ligne dans le bloc de données. Cette fonction ne prend en compte que les blocs de données affectés. - -## voisin {#neighbor} - -La fonction de fenêtre qui donne accès à une ligne à un décalage spécifié qui vient avant ou après la ligne actuelle d'une colonne donnée. - -**Syntaxe** - -``` sql -neighbor(column, offset[, default_value]) -``` - -Le résultat de la fonction dépend du touché des blocs de données et l'ordre des données dans le bloc. -Si vous créez une sous-requête avec ORDER BY et appelez la fonction depuis l'extérieur de la sous-requête, vous pouvez obtenir le résultat attendu. - -**Paramètre** - -- `column` — A column name or scalar expression. -- `offset` — The number of rows forwards or backwards from the current row of `column`. [Int64](../../sql-reference/data-types/int-uint.md). -- `default_value` — Optional. The value to be returned if offset goes beyond the scope of the block. Type of data blocks affected. - -**Valeurs renvoyées** - -- De la valeur pour `column` dans `offset` distance de la ligne actuelle si `offset` la valeur n'est pas en dehors des limites du bloc. -- La valeur par défaut pour `column` si `offset` la valeur est en dehors des limites du bloc. Si `default_value` est donné, alors il sera utilisé. - -Type: type de blocs de données affectés ou type de valeur par défaut. - -**Exemple** - -Requête: - -``` sql -SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; -``` - -Résultat: - -``` text -┌─number─┬─neighbor(number, 2)─┐ -│ 0 │ 2 │ -│ 1 │ 3 │ -│ 2 │ 4 │ -│ 3 │ 5 │ -│ 4 │ 6 │ -│ 5 │ 7 │ -│ 6 │ 8 │ -│ 7 │ 9 │ -│ 8 │ 0 │ -│ 9 │ 0 │ -└────────┴─────────────────────┘ -``` - -Requête: - -``` sql -SELECT number, neighbor(number, 2, 999) FROM system.numbers LIMIT 10; -``` - -Résultat: - -``` text -┌─number─┬─neighbor(number, 2, 999)─┐ -│ 0 │ 2 │ -│ 1 │ 3 │ -│ 2 │ 4 │ -│ 3 │ 5 │ -│ 4 │ 6 │ -│ 5 │ 7 │ -│ 6 │ 8 │ -│ 7 │ 9 │ -│ 8 │ 999 │ -│ 9 │ 999 │ -└────────┴──────────────────────────┘ -``` - -Cette fonction peut être utilisée pour calculer une année à valeur métrique: - -Requête: - -``` sql -WITH toDate('2018-01-01') AS start_date -SELECT - toStartOfMonth(start_date + (number * 32)) AS month, - toInt32(month) % 100 AS money, - neighbor(money, -12) AS prev_year, - round(prev_year / money, 2) AS year_over_year -FROM numbers(16) -``` - -Résultat: - -``` text -┌──────month─┬─money─┬─prev_year─┬─year_over_year─┐ -│ 2018-01-01 │ 32 │ 0 │ 0 │ -│ 2018-02-01 │ 63 │ 0 │ 0 │ -│ 2018-03-01 │ 91 │ 0 │ 0 │ -│ 2018-04-01 │ 22 │ 0 │ 0 │ -│ 2018-05-01 │ 52 │ 0 │ 0 │ -│ 2018-06-01 │ 83 │ 0 │ 0 │ -│ 2018-07-01 │ 13 │ 0 │ 0 │ -│ 2018-08-01 │ 44 │ 0 │ 0 │ -│ 2018-09-01 │ 75 │ 0 │ 0 │ -│ 2018-10-01 │ 5 │ 0 │ 0 │ -│ 2018-11-01 │ 36 │ 0 │ 0 │ -│ 2018-12-01 │ 66 │ 0 │ 0 │ -│ 2019-01-01 │ 97 │ 32 │ 0.33 │ -│ 2019-02-01 │ 28 │ 63 │ 2.25 │ -│ 2019-03-01 │ 56 │ 91 │ 1.62 │ -│ 2019-04-01 │ 87 │ 22 │ 0.25 │ -└────────────┴───────┴───────────┴────────────────┘ -``` - -## runningDifference(x) {#other_functions-runningdifference} - -Calculates the difference between successive row values ​​in the data block. -Renvoie 0 pour la première ligne et la différence par rapport à la rangée précédente pour chaque nouvelle ligne. - -Le résultat de la fonction dépend du touché des blocs de données et l'ordre des données dans le bloc. -Si vous créez une sous-requête avec ORDER BY et appelez la fonction depuis l'extérieur de la sous-requête, vous pouvez obtenir le résultat attendu. - -Exemple: - -``` sql -SELECT - EventID, - EventTime, - runningDifference(EventTime) AS delta -FROM -( - SELECT - EventID, - EventTime - FROM events - WHERE EventDate = '2016-11-24' - ORDER BY EventTime ASC - LIMIT 5 -) -``` - -``` text -┌─EventID─┬───────────EventTime─┬─delta─┐ -│ 1106 │ 2016-11-24 00:00:04 │ 0 │ -│ 1107 │ 2016-11-24 00:00:05 │ 1 │ -│ 1108 │ 2016-11-24 00:00:05 │ 0 │ -│ 1109 │ 2016-11-24 00:00:09 │ 4 │ -│ 1110 │ 2016-11-24 00:00:10 │ 1 │ -└─────────┴─────────────────────┴───────┘ -``` - -Veuillez noter que la taille du bloc affecte le résultat. Avec chaque nouveau bloc, le `runningDifference` l'état est réinitialisé. - -``` sql -SELECT - number, - runningDifference(number + 1) AS diff -FROM numbers(100000) -WHERE diff != 1 -``` - -``` text -┌─number─┬─diff─┐ -│ 0 │ 0 │ -└────────┴──────┘ -┌─number─┬─diff─┐ -│ 65536 │ 0 │ -└────────┴──────┘ -``` - -``` sql -set max_block_size=100000 -- default value is 65536! - -SELECT - number, - runningDifference(number + 1) AS diff -FROM numbers(100000) -WHERE diff != 1 -``` - -``` text -┌─number─┬─diff─┐ -│ 0 │ 0 │ -└────────┴──────┘ -``` - -## runningDifferenceStartingWithFirstvalue {#runningdifferencestartingwithfirstvalue} - -De même que pour [runningDifference](./other-functions.md#other_functions-runningdifference) la différence est la valeur de la première ligne, est retourné à la valeur de la première ligne, et chaque rangée suivante renvoie la différence de la rangée précédente. - -## MACNumToString (num) {#macnumtostringnum} - -Accepte un numéro UInt64. Interprète comme une adresse MAC dans big endian. Renvoie une chaîne contenant l'adresse MAC correspondante au format AA:BB:CC: DD:EE: FF (Nombres séparés par deux points sous forme hexadécimale). - -## MACStringToNum (s) {#macstringtonums} - -La fonction inverse de MACNumToString. Si l'adresse MAC a un format non valide, elle renvoie 0. - -## MACStringToOUI (s) {#macstringtoouis} - -Accepte une adresse MAC au format AA:BB:CC: DD:EE: FF (Nombres séparés par deux points sous forme hexadécimale). Renvoie les trois premiers octets sous la forme D'un nombre UInt64. Si l'adresse MAC a un format non valide, elle renvoie 0. - -## getSizeOfEnumType {#getsizeofenumtype} - -Retourne le nombre de champs dans [Enum](../../sql-reference/data-types/enum.md). - -``` sql -getSizeOfEnumType(value) -``` - -**Paramètre:** - -- `value` — Value of type `Enum`. - -**Valeurs renvoyées** - -- Le nombre de champs avec `Enum` les valeurs d'entrée. -- Une exception est levée si le type n'est pas `Enum`. - -**Exemple** - -``` sql -SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x -``` - -``` text -┌─x─┐ -│ 2 │ -└───┘ -``` - -## blockSerializedSize {#blockserializedsize} - -Retourne la taille sur le disque (sans tenir compte de la compression). - -``` sql -blockSerializedSize(value[, value[, ...]]) -``` - -**Paramètre:** - -- `value` — Any value. - -**Valeurs renvoyées** - -- Le nombre d'octets qui seront écrites sur le disque pour le bloc de valeurs (sans compression). - -**Exemple** - -``` sql -SELECT blockSerializedSize(maxState(1)) as x -``` - -``` text -┌─x─┐ -│ 2 │ -└───┘ -``` - -## toColumnTypeName {#tocolumntypename} - -Renvoie le nom de la classe qui représente le type de données de la colonne dans la RAM. - -``` sql -toColumnTypeName(value) -``` - -**Paramètre:** - -- `value` — Any type of value. - -**Valeurs renvoyées** - -- Une chaîne avec le nom de la classe utilisée pour représenter `value` type de données dans la mémoire RAM. - -**Exemple de la différence entre`toTypeName ' and ' toColumnTypeName`** - -``` sql -SELECT toTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) -``` - -``` text -┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ -│ DateTime │ -└─────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT toColumnTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) -``` - -``` text -┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ -│ Const(UInt32) │ -└───────────────────────────────────────────────────────────┘ -``` - -L'exemple montre que le `DateTime` type de données est stocké dans la mémoire comme `Const(UInt32)`. - -## dumpColumnStructure {#dumpcolumnstructure} - -Affiche une description détaillée des structures de données en RAM - -``` sql -dumpColumnStructure(value) -``` - -**Paramètre:** - -- `value` — Any type of value. - -**Valeurs renvoyées** - -- Une chaîne décrivant la structure utilisée pour représenter `value` type de données dans la mémoire RAM. - -**Exemple** - -``` sql -SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) -``` - -``` text -┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ -│ DateTime, Const(size = 1, UInt32(size = 1)) │ -└──────────────────────────────────────────────────────────────┘ -``` - -## defaultValueOfArgumentType {#defaultvalueofargumenttype} - -Affiche la valeur par défaut du type de données. - -Ne pas inclure des valeurs par défaut pour les colonnes personnalisées définies par l'utilisateur. - -``` sql -defaultValueOfArgumentType(expression) -``` - -**Paramètre:** - -- `expression` — Arbitrary type of value or an expression that results in a value of an arbitrary type. - -**Valeurs renvoyées** - -- `0` pour les nombres. -- Chaîne vide pour les chaînes. -- `ᴺᵁᴸᴸ` pour [Nullable](../../sql-reference/data-types/nullable.md). - -**Exemple** - -``` sql -SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) -``` - -``` text -┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ -│ 0 │ -└─────────────────────────────────────────────┘ -``` - -``` sql -SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) -``` - -``` text -┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ -│ ᴺᵁᴸᴸ │ -└───────────────────────────────────────────────────────┘ -``` - -## reproduire {#other-functions-replicate} - -Crée un tableau avec une seule valeur. - -Utilisé pour la mise en œuvre interne de [arrayJoin](array-join.md#functions_arrayjoin). - -``` sql -SELECT replicate(x, arr); -``` - -**Paramètre:** - -- `arr` — Original array. ClickHouse creates a new array of the same length as the original and fills it with the value `x`. -- `x` — The value that the resulting array will be filled with. - -**Valeur renvoyée** - -Un tableau rempli de la valeur `x`. - -Type: `Array`. - -**Exemple** - -Requête: - -``` sql -SELECT replicate(1, ['a', 'b', 'c']) -``` - -Résultat: - -``` text -┌─replicate(1, ['a', 'b', 'c'])─┐ -│ [1,1,1] │ -└───────────────────────────────┘ -``` - -## filesystemAvailable {#filesystemavailable} - -Renvoie la quantité d'espace restant sur le système de fichiers où se trouvent les fichiers des bases de données. Il est toujours plus petit que l'espace libre total ([filesystemFree](#filesystemfree)) parce qu'un peu d'espace est réservé au système D'exploitation. - -**Syntaxe** - -``` sql -filesystemAvailable() -``` - -**Valeur renvoyée** - -- La quantité d'espace restant disponible en octets. - -Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT formatReadableSize(filesystemAvailable()) AS "Available space", toTypeName(filesystemAvailable()) AS "Type"; -``` - -Résultat: - -``` text -┌─Available space─┬─Type───┐ -│ 30.75 GiB │ UInt64 │ -└─────────────────┴────────┘ -``` - -## filesystemFree {#filesystemfree} - -Retourne montant total de l'espace libre sur le système de fichiers où les fichiers des bases de données. Voir aussi `filesystemAvailable` - -**Syntaxe** - -``` sql -filesystemFree() -``` - -**Valeur renvoyée** - -- Quantité d'espace libre en octets. - -Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT formatReadableSize(filesystemFree()) AS "Free space", toTypeName(filesystemFree()) AS "Type"; -``` - -Résultat: - -``` text -┌─Free space─┬─Type───┐ -│ 32.39 GiB │ UInt64 │ -└────────────┴────────┘ -``` - -## filesystemCapacity {#filesystemcapacity} - -Renvoie la capacité du système de fichiers en octets. Pour l'évaluation, la [chemin](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) le répertoire de données doit être configuré. - -**Syntaxe** - -``` sql -filesystemCapacity() -``` - -**Valeur renvoyée** - -- Informations de capacité du système de fichiers en octets. - -Type: [UInt64](../../sql-reference/data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesystemCapacity()) AS "Type" -``` - -Résultat: - -``` text -┌─Capacity──┬─Type───┐ -│ 39.32 GiB │ UInt64 │ -└───────────┴────────┘ -``` - -## finalizeAggregation {#function-finalizeaggregation} - -Prend de l'état de la fonction d'agrégation. Renvoie le résultat de l'agrégation (état finalisé). - -## runningAccumulate {#function-runningaccumulate} - -Prend les membres de la fonction d'agrégation et renvoie une colonne avec des valeurs, sont le résultat de l'accumulation de ces états pour un ensemble de bloc de lignes, de la première à la ligne actuelle. -Par exemple, prend l'état de la fonction d'agrégat (exemple runningAccumulate(uniqState(UserID))), et pour chaque ligne de bloc, retourne le résultat de la fonction d'agrégat lors de la fusion des états de toutes les lignes précédentes et de la ligne actuelle. -Ainsi, le résultat de la fonction dépend de la partition des données aux blocs et de l'ordre des données dans le bloc. - -## joinGet {#joinget} - -La fonction vous permet d'extraire les données de la table de la même manière qu'à partir d'un [dictionnaire](../../sql-reference/dictionaries/index.md). - -Obtient les données de [Rejoindre](../../engines/table-engines/special/join.md#creating-a-table) tables utilisant la clé de jointure spécifiée. - -Ne prend en charge que les tables créées avec `ENGINE = Join(ANY, LEFT, )` déclaration. - -**Syntaxe** - -``` sql -joinGet(join_storage_table_name, `value_column`, join_keys) -``` - -**Paramètre** - -- `join_storage_table_name` — an [identificateur](../syntax.md#syntax-identifiers) indique l'endroit où la recherche est effectuée. L'identificateur est recherché dans la base de données par défaut (voir paramètre `default_database` dans le fichier de config). Pour remplacer la base de données par défaut, utilisez `USE db_name` ou spécifiez la base de données et la table via le séparateur `db_name.db_table` voir l'exemple. -- `value_column` — name of the column of the table that contains required data. -- `join_keys` — list of keys. - -**Valeur renvoyée** - -Retourne la liste des valeurs correspond à la liste des clés. - -Si certain n'existe pas dans la table source alors `0` ou `null` seront renvoyés basé sur [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) paramètre. - -Plus d'infos sur `join_use_nulls` dans [Opération de jointure](../../engines/table-engines/special/join.md). - -**Exemple** - -Table d'entrée: - -``` sql -CREATE DATABASE db_test -CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 -INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) -``` - -``` text -┌─id─┬─val─┐ -│ 4 │ 13 │ -│ 2 │ 12 │ -│ 1 │ 11 │ -└────┴─────┘ -``` - -Requête: - -``` sql -SELECT joinGet(db_test.id_val,'val',toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 -``` - -Résultat: - -``` text -┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐ -│ 0 │ -│ 11 │ -│ 12 │ -│ 0 │ -└──────────────────────────────────────────────────┘ -``` - -## modelEvaluate(model_name, …) {#function-modelevaluate} - -Évaluer le modèle externe. -Accepte un nom de modèle et le modèle de l'argumentation. Renvoie Float64. - -## throwIf (x \[, custom_message\]) {#throwifx-custom-message} - -Lever une exception si l'argument est non nul. -custom_message - est un paramètre optionnel: une chaîne constante, fournit un message d'erreur - -``` sql -SELECT throwIf(number = 3, 'Too many') FROM numbers(10); -``` - -``` text -↙ Progress: 0.00 rows, 0.00 B (0.00 rows/s., 0.00 B/s.) Received exception from server (version 19.14.1): -Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many. -``` - -## identité {#identity} - -Renvoie la même valeur qui a été utilisée comme argument. Utilisé pour le débogage et les tests, permet d'annuler l'utilisation de l'index et d'obtenir les performances de requête d'une analyse complète. Lorsque la requête est analysée pour une utilisation possible de l'index, l'analyseur ne regarde pas à l'intérieur `identity` fonction. - -**Syntaxe** - -``` sql -identity(x) -``` - -**Exemple** - -Requête: - -``` sql -SELECT identity(42) -``` - -Résultat: - -``` text -┌─identity(42)─┐ -│ 42 │ -└──────────────┘ -``` - -## randomPrintableASCII {#randomascii} - -Génère une chaîne avec un ensemble aléatoire de [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) caractères imprimables. - -**Syntaxe** - -``` sql -randomPrintableASCII(length) -``` - -**Paramètre** - -- `length` — Resulting string length. Positive integer. - - If you pass `length < 0`, behavior of the function is undefined. - -**Valeur renvoyée** - -- Chaîne avec un ensemble aléatoire de [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) caractères imprimables. - -Type: [Chaîne](../../sql-reference/data-types/string.md) - -**Exemple** - -``` sql -SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers LIMIT 3 -``` - -``` text -┌─number─┬─str────────────────────────────┬─length(randomPrintableASCII(30))─┐ -│ 0 │ SuiCOSTvC0csfABSw=UcSzp2.`rv8x │ 30 │ -│ 1 │ 1Ag NlJ &RCN:*>HVPG;PE-nO"SUFD │ 30 │ -│ 2 │ /"+<"wUTh:=LjJ Vm!c&hI*m#XTfzz │ 30 │ -└────────┴────────────────────────────────┴──────────────────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/fr/sql-reference/functions/random-functions.md b/docs/fr/sql-reference/functions/random-functions.md deleted file mode 100644 index 3c4e15507bb..00000000000 --- a/docs/fr/sql-reference/functions/random-functions.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 51 -toc_title: "La G\xE9n\xE9ration De Nombres Pseudo-Al\xE9atoires" ---- - -# Fonctions pour générer des nombres Pseudo-aléatoires {#functions-for-generating-pseudo-random-numbers} - -Des générateurs Non cryptographiques de nombres pseudo-aléatoires sont utilisés. - -Toutes les fonctions acceptent zéro argument ou un argument. -Si un argument est passé, il peut être de n'importe quel type, et sa valeur n'est utilisée pour rien. -Le seul but de cet argument est d'empêcher l'élimination des sous-expressions courantes, de sorte que deux instances différentes de la même fonction renvoient des colonnes différentes avec des nombres aléatoires différents. - -## Rand {#rand} - -Renvoie un nombre UInt32 pseudo-aléatoire, réparti uniformément entre tous les nombres de type UInt32. -Utilise un générateur congruentiel linéaire. - -## rand64 {#rand64} - -Renvoie un nombre UInt64 pseudo-aléatoire, réparti uniformément entre tous les nombres de type UInt64. -Utilise un générateur congruentiel linéaire. - -## randConstant {#randconstant} - -Produit une colonne constante avec une valeur aléatoire. - -**Syntaxe** - -``` sql -randConstant([x]) -``` - -**Paramètre** - -- `x` — [Expression](../syntax.md#syntax-expressions) résultant de la [types de données pris en charge](../data-types/index.md#data_types). La valeur résultante est ignorée, mais l'expression elle-même si elle est utilisée pour contourner [élimination des sous-expressions courantes](index.md#common-subexpression-elimination) si la fonction est appelée plusieurs fois dans une seule requête. Paramètre facultatif. - -**Valeur renvoyée** - -- Nombre Pseudo-aléatoire. - -Type: [UInt32](../data-types/int-uint.md). - -**Exemple** - -Requête: - -``` sql -SELECT rand(), rand(1), rand(number), randConstant(), randConstant(1), randConstant(number) -FROM numbers(3) -``` - -Résultat: - -``` text -┌─────rand()─┬────rand(1)─┬─rand(number)─┬─randConstant()─┬─randConstant(1)─┬─randConstant(number)─┐ -│ 3047369878 │ 4132449925 │ 4044508545 │ 2740811946 │ 4229401477 │ 1924032898 │ -│ 2938880146 │ 1267722397 │ 4154983056 │ 2740811946 │ 4229401477 │ 1924032898 │ -│ 956619638 │ 4238287282 │ 1104342490 │ 2740811946 │ 4229401477 │ 1924032898 │ -└────────────┴────────────┴──────────────┴────────────────┴─────────────────┴──────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/fr/sql-reference/functions/rounding-functions.md b/docs/fr/sql-reference/functions/rounding-functions.md deleted file mode 100644 index f99e6358026..00000000000 --- a/docs/fr/sql-reference/functions/rounding-functions.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 45 -toc_title: Arrondi ---- - -# Fonctions D'Arrondi {#rounding-functions} - -## floor(x\[, N\]) {#floorx-n} - -Renvoie le plus grand nombre rond inférieur ou égal à `x`. Un nombre rond est un multiple de 1 / 10N, ou le nombre le plus proche du type de données approprié si 1 / 10N n'est pas exact. -‘N’ est une constante entière, paramètre facultatif. Par défaut, il est zéro, ce qui signifie arrondir à un entier. -‘N’ peut être négative. - -Exemple: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` - -`x` est n'importe quel type numérique. Le résultat est un nombre du même type. -Pour les arguments entiers, il est logique d'arrondir avec un négatif `N` valeur (pour non négatif `N`, la fonction ne fait rien). -Si l'arrondi provoque un débordement (par exemple, floor(-128, -1)), un résultat spécifique à l'implémentation est renvoyé. - -## ceil(x\[, n\]), plafond (x\[, n\]) {#ceilx-n-ceilingx-n} - -Renvoie le plus petit nombre rond supérieur ou égal à `x`. Dans tous les autres sens, il est le même que le `floor` fonction (voir ci-dessus). - -## trunc(x \[, N\]), truncate(x \[, N\]) {#truncx-n-truncatex-n} - -Renvoie le nombre rond avec la plus grande valeur absolue qui a une valeur absolue inférieure ou égale à `x`‘s. In every other way, it is the same as the ’floor’ fonction (voir ci-dessus). - -## round(x\[, N\]) {#rounding_functions-round} - -Arrondit une valeur à un nombre spécifié de décimales. - -La fonction renvoie le nombre plus proche de l'ordre spécifié. Dans le cas où un nombre donné a une distance égale aux nombres environnants, la fonction utilise l'arrondi de banquier pour les types de nombres flottants et arrondit à partir de zéro pour les autres types de nombres. - -``` sql -round(expression [, decimal_places]) -``` - -**Paramètre:** - -- `expression` — A number to be rounded. Can be any [expression](../syntax.md#syntax-expressions) retour du numérique [type de données](../../sql-reference/data-types/index.md#data_types). -- `decimal-places` — An integer value. - - Si `decimal-places > 0` alors la fonction arrondit la valeur à droite du point décimal. - - Si `decimal-places < 0` alors la fonction arrondit la valeur à gauche de la virgule décimale. - - Si `decimal-places = 0` alors la fonction arrondit la valeur à l'entier. Dans ce cas, l'argument peut être omis. - -**Valeur renvoyée:** - -Le nombre arrondi du même type que le nombre d'entrée. - -### Exemple {#examples} - -**Exemple d'utilisation** - -``` sql -SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3 -``` - -``` text -┌───x─┬─round(divide(number, 2))─┐ -│ 0 │ 0 │ -│ 0.5 │ 0 │ -│ 1 │ 1 │ -└─────┴──────────────────────────┘ -``` - -**Des exemples de l'arrondissement** - -Le résultat est arrondi au plus proche. - -``` text -round(3.2, 0) = 3 -round(4.1267, 2) = 4.13 -round(22,-1) = 20 -round(467,-2) = 500 -round(-467,-2) = -500 -``` - -Le Banquier arrondit. - -``` text -round(3.5) = 4 -round(4.5) = 4 -round(3.55, 1) = 3.6 -round(3.65, 1) = 3.6 -``` - -**Voir Aussi** - -- [roundBankers](#roundbankers) - -## roundBankers {#roundbankers} - -Arrondit un nombre à une position décimale spécifiée. - -- Si le nombre est arrondi à mi-chemin entre deux nombres, la fonction utilise l'arrondi. - - Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. - - It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`. - -- Dans d'autres cas, la fonction arrondit les nombres à l'entier le plus proche. - -À l'aide de l'arrondi, vous pouvez réduire l'effet qu'arrondir les nombres sur les résultats d'additionner ou de soustraire ces chiffres. - -Par exemple, les nombres de somme 1.5, 2.5, 3.5, 4.5 avec des arrondis différents: - -- Pas d'arrondi: 1.5 + 2.5 + 3.5 + 4.5 = 12. -- Arrondi du banquier: 2 + 2 + 4 + 4 = 12. -- Arrondi à l'entier le plus proche: 2 + 3 + 4 + 5 = 14. - -**Syntaxe** - -``` sql -roundBankers(expression [, decimal_places]) -``` - -**Paramètre** - -- `expression` — A number to be rounded. Can be any [expression](../syntax.md#syntax-expressions) retour du numérique [type de données](../../sql-reference/data-types/index.md#data_types). -- `decimal-places` — Decimal places. An integer number. - - `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. - - `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. - - `decimal-places = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. - -**Valeur renvoyée** - -Valeur arrondie par la méthode d'arrondi du banquier. - -### Exemple {#examples-1} - -**Exemple d'utilisation** - -Requête: - -``` sql - SELECT number / 2 AS x, roundBankers(x, 0) AS b fROM system.numbers limit 10 -``` - -Résultat: - -``` text -┌───x─┬─b─┐ -│ 0 │ 0 │ -│ 0.5 │ 0 │ -│ 1 │ 1 │ -│ 1.5 │ 2 │ -│ 2 │ 2 │ -│ 2.5 │ 2 │ -│ 3 │ 3 │ -│ 3.5 │ 4 │ -│ 4 │ 4 │ -│ 4.5 │ 4 │ -└─────┴───┘ -``` - -**Exemples d'arrondi bancaire** - -``` text -roundBankers(0.4) = 0 -roundBankers(-3.5) = -4 -roundBankers(4.5) = 4 -roundBankers(3.55, 1) = 3.6 -roundBankers(3.65, 1) = 3.6 -roundBankers(10.35, 1) = 10.4 -roundBankers(10.755, 2) = 11,76 -``` - -**Voir Aussi** - -- [rond](#rounding_functions-round) - -## roundToExp2 (num) {#roundtoexp2num} - -Accepte un certain nombre. Si le nombre est inférieur à un, elle renvoie 0. Sinon, il arrondit le nombre au degré le plus proche (entier non négatif) de deux. - -## roundDuration (num) {#rounddurationnum} - -Accepte un certain nombre. Si le nombre est inférieur à un, elle renvoie 0. Sinon, il arrondit le nombre vers le bas pour les nombres de l'ensemble: 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000. Cette fonction est spécifique à Yandex.Metrica et utilisé pour la mise en œuvre du rapport sur la durée de la session. - -## roundAge (num) {#roundagenum} - -Accepte un certain nombre. Si le nombre est inférieur à 18, il renvoie 0. Sinon, il arrondit le nombre à un nombre de l'ensemble: 18, 25, 35, 45, 55. Cette fonction est spécifique à Yandex.Metrica et utilisé pour la mise en œuvre du rapport sur l'âge des utilisateurs. - -## roundDown(num, arr) {#rounddownnum-arr} - -Accepte un nombre et l'arrondit à un élément dans le tableau spécifié. Si la valeur est inférieure à la plus basse, la plus basse lié est retourné. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/fr/sql-reference/functions/splitting-merging-functions.md b/docs/fr/sql-reference/functions/splitting-merging-functions.md deleted file mode 100644 index a1260e918b0..00000000000 --- a/docs/fr/sql-reference/functions/splitting-merging-functions.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 47 -toc_title: "Fractionnement et fusion de cha\xEEnes et de tableaux" ---- - -# Fonctions pour diviser et fusionner des chaînes et des tableaux {#functions-for-splitting-and-merging-strings-and-arrays} - -## splitByChar (séparateur, s) {#splitbycharseparator-s} - -Divise une chaîne en sous-chaînes séparées par un caractère spécifique. Il utilise une chaîne constante `separator` qui composé d'un seul caractère. -Retourne un tableau de certaines chaînes. Les sous-chaînes vides peuvent être sélectionnées si le séparateur se produit au début ou à la fin de la chaîne, ou s'il existe plusieurs séparateurs consécutifs. - -**Syntaxe** - -``` sql -splitByChar(, ) -``` - -**Paramètre** - -- `separator` — The separator which should contain exactly one character. [Chaîne](../../sql-reference/data-types/string.md). -- `s` — The string to split. [Chaîne](../../sql-reference/data-types/string.md). - -**Valeur renvoyée(s)** - -Retourne un tableau de certaines chaînes. Des sous-chaînes vides peuvent être sélectionnées lorsque: - -- Un séparateur se produit au début ou à la fin de la chaîne; -- Il existe plusieurs séparateurs consécutifs; -- La chaîne d'origine `s` est vide. - -Type: [Tableau](../../sql-reference/data-types/array.md) de [Chaîne](../../sql-reference/data-types/string.md). - -**Exemple** - -``` sql -SELECT splitByChar(',', '1,2,3,abcde') -``` - -``` text -┌─splitByChar(',', '1,2,3,abcde')─┐ -│ ['1','2','3','abcde'] │ -└─────────────────────────────────┘ -``` - -## splitByString(séparateur, s) {#splitbystringseparator-s} - -Divise une chaîne en sous-chaînes séparées par une chaîne. Il utilise une chaîne constante `separator` de plusieurs caractères comme séparateur. Si la chaîne `separator` est vide, il va diviser la chaîne `s` dans un tableau de caractères uniques. - -**Syntaxe** - -``` sql -splitByString(, ) -``` - -**Paramètre** - -- `separator` — The separator. [Chaîne](../../sql-reference/data-types/string.md). -- `s` — The string to split. [Chaîne](../../sql-reference/data-types/string.md). - -**Valeur renvoyée(s)** - -Retourne un tableau de certaines chaînes. Des sous-chaînes vides peuvent être sélectionnées lorsque: - -Type: [Tableau](../../sql-reference/data-types/array.md) de [Chaîne](../../sql-reference/data-types/string.md). - -- Un séparateur non vide se produit au début ou à la fin de la chaîne; -- Il existe plusieurs séparateurs consécutifs non vides; -- La chaîne d'origine `s` est vide tandis que le séparateur n'est pas vide. - -**Exemple** - -``` sql -SELECT splitByString(', ', '1, 2 3, 4,5, abcde') -``` - -``` text -┌─splitByString(', ', '1, 2 3, 4,5, abcde')─┐ -│ ['1','2 3','4,5','abcde'] │ -└───────────────────────────────────────────┘ -``` - -``` sql -SELECT splitByString('', 'abcde') -``` - -``` text -┌─splitByString('', 'abcde')─┐ -│ ['a','b','c','d','e'] │ -└────────────────────────────┘ -``` - -## arrayStringConcat(arr \[, séparateur\]) {#arraystringconcatarr-separator} - -Concatène les chaînes répertoriées dans le tableau avec le séparateur."séparateur" est un paramètre facultatif: une chaîne constante, définie à une chaîne vide par défaut. -Retourne une chaîne de caractères. - -## alphaTokens (s) {#alphatokenss} - -Sélectionne des sous-chaînes d'octets consécutifs dans les plages A-z et A-Z. retourne un tableau de sous-chaînes. - -**Exemple** - -``` sql -SELECT alphaTokens('abca1abc') -``` - -``` text -┌─alphaTokens('abca1abc')─┐ -│ ['abca','abc'] │ -└─────────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/fr/sql-reference/functions/string-functions.md b/docs/fr/sql-reference/functions/string-functions.md deleted file mode 100644 index 1482952426c..00000000000 --- a/docs/fr/sql-reference/functions/string-functions.md +++ /dev/null @@ -1,489 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 40 -toc_title: "Travailler avec des cha\xEEnes" ---- - -# Fonctions pour travailler avec des chaînes {#functions-for-working-with-strings} - -## vide {#empty} - -Renvoie 1 pour une chaîne vide ou 0 pour une chaîne non vide. -Le type de résultat est UInt8. -Une chaîne est considérée comme non vide si elle contient au moins un octet, même s'il s'agit d'un espace ou d'un octet nul. -La fonction fonctionne également pour les tableaux. - -## notEmpty {#notempty} - -Renvoie 0 pour une chaîne vide ou 1 pour une chaîne non vide. -Le type de résultat est UInt8. -La fonction fonctionne également pour les tableaux. - -## longueur {#length} - -Renvoie la longueur d'une chaîne en octets (pas en caractères, et pas en points de code). -Le type de résultat est UInt64. -La fonction fonctionne également pour les tableaux. - -## lengthUTF8 {#lengthutf8} - -Renvoie la longueur d'une chaîne en points de code Unicode (pas en caractères), en supposant que la chaîne contient un ensemble d'octets qui composent le texte codé en UTF-8. Si cette hypothèse n'est pas remplie, elle renvoie un résultat (elle ne lance pas d'exception). -Le type de résultat est UInt64. - -## char_length, CHAR_LENGTH {#char-length} - -Renvoie la longueur d'une chaîne en points de code Unicode (pas en caractères), en supposant que la chaîne contient un ensemble d'octets qui composent le texte codé en UTF-8. Si cette hypothèse n'est pas remplie, elle renvoie un résultat (elle ne lance pas d'exception). -Le type de résultat est UInt64. - -## character_length, CHARACTER_LENGTH {#character-length} - -Renvoie la longueur d'une chaîne en points de code Unicode (pas en caractères), en supposant que la chaîne contient un ensemble d'octets qui composent le texte codé en UTF-8. Si cette hypothèse n'est pas remplie, elle renvoie un résultat (elle ne lance pas d'exception). -Le type de résultat est UInt64. - -## plus bas, lcase {#lower} - -Convertit les symboles latins ASCII dans une chaîne en minuscules. - -## supérieur, ucase {#upper} - -Convertit les symboles latins ASCII dans une chaîne en majuscules. - -## lowerUTF8 {#lowerutf8} - -Convertit une chaîne en minuscules, en supposant que la chaîne de caractères contient un ensemble d'octets qui composent un texte UTF-8. -Il ne détecte pas la langue. Donc, pour le turc, le résultat pourrait ne pas être exactement correct. -Si la longueur de la séquence d'octets UTF-8 est différente pour les majuscules et les minuscules d'un point de code, le résultat peut être incorrect pour ce point de code. -Si la chaîne contient un ensemble d'octets qui N'est pas UTF-8, le comportement n'est pas défini. - -## upperUTF8 {#upperutf8} - -Convertit une chaîne en majuscules, en supposant que la chaîne de caractères contient un ensemble d'octets qui composent un texte UTF-8. -Il ne détecte pas la langue. Donc, pour le turc, le résultat pourrait ne pas être exactement correct. -Si la longueur de la séquence d'octets UTF-8 est différente pour les majuscules et les minuscules d'un point de code, le résultat peut être incorrect pour ce point de code. -Si la chaîne contient un ensemble d'octets qui N'est pas UTF-8, le comportement n'est pas défini. - -## isValidUTF8 {#isvalidutf8} - -Renvoie 1, si l'ensemble d'octets est codé en UTF-8 valide, sinon 0. - -## toValidUTF8 {#tovalidutf8} - -Remplace les caractères UTF-8 non valides par `�` (U+FFFD) caractère. Tous les caractères non valides s'exécutant dans une rangée sont réduits en un seul caractère de remplacement. - -``` sql -toValidUTF8( input_string ) -``` - -Paramètre: - -- input_string — Any set of bytes represented as the [Chaîne](../../sql-reference/data-types/string.md) type de données objet. - -Valeur renvoyée: chaîne UTF-8 valide. - -**Exemple** - -``` sql -SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') -``` - -``` text -┌─toValidUTF8('a����b')─┐ -│ a�b │ -└───────────────────────┘ -``` - -## répéter {#repeat} - -Répète une corde autant de fois que spécifié et concatène les valeurs répliquées comme une seule chaîne. - -**Syntaxe** - -``` sql -repeat(s, n) -``` - -**Paramètre** - -- `s` — The string to repeat. [Chaîne](../../sql-reference/data-types/string.md). -- `n` — The number of times to repeat the string. [UInt](../../sql-reference/data-types/int-uint.md). - -**Valeur renvoyée** - -La chaîne unique, qui contient la chaîne `s` répéter `n` temps. Si `n` \< 1, la fonction renvoie une chaîne vide. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT repeat('abc', 10) -``` - -Résultat: - -``` text -┌─repeat('abc', 10)──────────────┐ -│ abcabcabcabcabcabcabcabcabcabc │ -└────────────────────────────────┘ -``` - -## inverser {#reverse} - -Inverse la chaîne (comme une séquence d'octets). - -## reverseUTF8 {#reverseutf8} - -Inverse une séquence de points de code Unicode, en supposant que la chaîne contient un ensemble d'octets représentant un texte UTF-8. Sinon, il fait autre chose (il ne lance pas d'exception). - -## format(pattern, s0, s1, …) {#format} - -Formatage du motif constant avec la chaîne listée dans les arguments. `pattern` est un modèle de format Python simplifié. Chaîne de Format contient “replacement fields” entouré par des accolades `{}`. Tout ce qui n'est pas contenu dans les accolades est considéré comme du texte littéral, qui est copié inchangé dans la sortie. Si vous devez inclure un caractère d'Accolade dans le texte littéral, il peut être échappé en doublant: `{{ '{{' }}` et `{{ '}}' }}`. Les noms de champs peuvent être des nombres (à partir de zéro) ou vides (ils sont alors traités comme des nombres de conséquence). - -``` sql -SELECT format('{1} {0} {1}', 'World', 'Hello') -``` - -``` text -┌─format('{1} {0} {1}', 'World', 'Hello')─┐ -│ Hello World Hello │ -└─────────────────────────────────────────┘ -``` - -``` sql -SELECT format('{} {}', 'Hello', 'World') -``` - -``` text -┌─format('{} {}', 'Hello', 'World')─┐ -│ Hello World │ -└───────────────────────────────────┘ -``` - -## concat {#concat} - -Concatène les chaînes répertoriées dans les arguments, sans séparateur. - -**Syntaxe** - -``` sql -concat(s1, s2, ...) -``` - -**Paramètre** - -Valeurs de type String ou FixedString. - -**Valeurs renvoyées** - -Renvoie la chaîne qui résulte de la concaténation des arguments. - -Si l'une des valeurs d'argument est `NULL`, `concat` retourner `NULL`. - -**Exemple** - -Requête: - -``` sql -SELECT concat('Hello, ', 'World!') -``` - -Résultat: - -``` text -┌─concat('Hello, ', 'World!')─┐ -│ Hello, World! │ -└─────────────────────────────┘ -``` - -## concatAssumeInjective {#concatassumeinjective} - -Même que [concat](#concat) la différence est que vous devez vous assurer que `concat(s1, s2, ...) → sn` est injectif, il sera utilisé pour l'optimisation du groupe par. - -La fonction est nommée “injective” si elle renvoie toujours un résultat différent pour différentes valeurs d'arguments. En d'autres termes: des arguments différents ne donnent jamais un résultat identique. - -**Syntaxe** - -``` sql -concatAssumeInjective(s1, s2, ...) -``` - -**Paramètre** - -Valeurs de type String ou FixedString. - -**Valeurs renvoyées** - -Renvoie la chaîne qui résulte de la concaténation des arguments. - -Si l'une des valeurs d'argument est `NULL`, `concatAssumeInjective` retourner `NULL`. - -**Exemple** - -Table d'entrée: - -``` sql -CREATE TABLE key_val(`key1` String, `key2` String, `value` UInt32) ENGINE = TinyLog; -INSERT INTO key_val VALUES ('Hello, ','World',1), ('Hello, ','World',2), ('Hello, ','World!',3), ('Hello',', World!',2); -SELECT * from key_val; -``` - -``` text -┌─key1────┬─key2─────┬─value─┐ -│ Hello, │ World │ 1 │ -│ Hello, │ World │ 2 │ -│ Hello, │ World! │ 3 │ -│ Hello │ , World! │ 2 │ -└─────────┴──────────┴───────┘ -``` - -Requête: - -``` sql -SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY concatAssumeInjective(key1, key2) -``` - -Résultat: - -``` text -┌─concat(key1, key2)─┬─sum(value)─┐ -│ Hello, World! │ 3 │ -│ Hello, World! │ 2 │ -│ Hello, World │ 3 │ -└────────────────────┴────────────┘ -``` - -## substring(s, offset, longueur), mid(s, offset, longueur), substr(s, offset, longueur) {#substring} - -Renvoie une sous-chaîne commençant par l'octet du ‘offset’ index ‘length’ octets de long. L'indexation des caractères commence à partir d'un (comme dans SQL standard). Le ‘offset’ et ‘length’ les arguments doivent être des constantes. - -## substringUTF8(s, offset, longueur) {#substringutf8} - -Le même que ‘substring’, mais pour les points de code Unicode. Fonctionne sous l'hypothèse que la chaîne contient un ensemble d'octets représentant un texte codé en UTF-8. Si cette hypothèse n'est pas remplie, elle renvoie un résultat (elle ne lance pas d'exception). - -## appendTrailingCharIfAbsent (s, c) {#appendtrailingcharifabsent} - -Si l' ‘s’ la chaîne n'est pas vide et ne contient pas ‘c’ personnage à la fin, il ajoute le ‘c’ personnage à la fin. - -## convertCharset(s, à partir de, à) {#convertcharset} - -Retourne une chaîne de caractères ‘s’ qui a été converti à partir de l'encodage dans ‘from’ pour l'encodage dans ‘to’. - -## base64Encode(s) {#base64encode} - -Encodage ‘s’ chaîne dans base64 - -## base64Decode(s) {#base64decode} - -Décoder la chaîne codée en base64 ‘s’ dans la chaîne d'origine. En cas d'échec, une exception est levée. - -## tryBase64Decode(s) {#trybase64decode} - -Semblable à base64Decode, mais en cas d'erreur, une chaîne vide serait renvoyé. - -## endsWith (s, suffixe) {#endswith} - -Renvoie s'il faut se terminer par le suffixe spécifié. Retourne 1 si la chaîne se termine par le suffixe spécifié, sinon elle renvoie 0. - -## startsWith (STR, préfixe) {#startswith} - -Retourne 1 si la chaîne commence par le préfixe spécifié, sinon elle renvoie 0. - -``` sql -SELECT startsWith('Spider-Man', 'Spi'); -``` - -**Valeurs renvoyées** - -- 1, si la chaîne commence par le préfixe spécifié. -- 0, si la chaîne ne commence pas par le préfixe spécifié. - -**Exemple** - -Requête: - -``` sql -SELECT startsWith('Hello, world!', 'He'); -``` - -Résultat: - -``` text -┌─startsWith('Hello, world!', 'He')─┐ -│ 1 │ -└───────────────────────────────────┘ -``` - -## coupe {#trim} - -Supprime tous les caractères spécifiés du début ou de la fin d'une chaîne. -Par défaut supprime toutes les occurrences consécutives d'espaces communs (caractère ASCII 32) des deux extrémités d'une chaîne. - -**Syntaxe** - -``` sql -trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) -``` - -**Paramètre** - -- `trim_character` — specified characters for trim. [Chaîne](../../sql-reference/data-types/string.md). -- `input_string` — string for trim. [Chaîne](../../sql-reference/data-types/string.md). - -**Valeur renvoyée** - -Une chaîne sans caractères de début et (ou) de fin spécifiés. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT trim(BOTH ' ()' FROM '( Hello, world! )') -``` - -Résultat: - -``` text -┌─trim(BOTH ' ()' FROM '( Hello, world! )')─┐ -│ Hello, world! │ -└───────────────────────────────────────────────┘ -``` - -## trimLeft {#trimleft} - -Supprime toutes les occurrences consécutives d'espaces communs (caractère ASCII 32) depuis le début d'une chaîne. Il ne supprime pas d'autres types de caractères d'espaces (tabulation, espace sans pause, etc.). - -**Syntaxe** - -``` sql -trimLeft(input_string) -``` - -Alias: `ltrim(input_string)`. - -**Paramètre** - -- `input_string` — string to trim. [Chaîne](../../sql-reference/data-types/string.md). - -**Valeur renvoyée** - -Une chaîne sans ouvrir les espaces communs. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT trimLeft(' Hello, world! ') -``` - -Résultat: - -``` text -┌─trimLeft(' Hello, world! ')─┐ -│ Hello, world! │ -└─────────────────────────────────────┘ -``` - -## trimRight {#trimright} - -Supprime toutes les occurrences consécutives d'espaces communs (caractère ASCII 32) de la fin d'une chaîne. Il ne supprime pas d'autres types de caractères d'espaces (tabulation, espace sans pause, etc.). - -**Syntaxe** - -``` sql -trimRight(input_string) -``` - -Alias: `rtrim(input_string)`. - -**Paramètre** - -- `input_string` — string to trim. [Chaîne](../../sql-reference/data-types/string.md). - -**Valeur renvoyée** - -Une chaîne sans espaces communs de fin. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT trimRight(' Hello, world! ') -``` - -Résultat: - -``` text -┌─trimRight(' Hello, world! ')─┐ -│ Hello, world! │ -└──────────────────────────────────────┘ -``` - -## trimBoth {#trimboth} - -Supprime toutes les occurrences consécutives d'espaces communs (caractère ASCII 32) des deux extrémités d'une chaîne. Il ne supprime pas d'autres types de caractères d'espaces (tabulation, espace sans pause, etc.). - -**Syntaxe** - -``` sql -trimBoth(input_string) -``` - -Alias: `trim(input_string)`. - -**Paramètre** - -- `input_string` — string to trim. [Chaîne](../../sql-reference/data-types/string.md). - -**Valeur renvoyée** - -Une chaîne sans espaces communs de début et de fin. - -Type: `String`. - -**Exemple** - -Requête: - -``` sql -SELECT trimBoth(' Hello, world! ') -``` - -Résultat: - -``` text -┌─trimBoth(' Hello, world! ')─┐ -│ Hello, world! │ -└─────────────────────────────────────┘ -``` - -## CRC32 (s) {#crc32} - -Renvoie la somme de contrôle CRC32 d'une chaîne, en utilisant le polynôme CRC-32-IEEE 802.3 et la valeur initiale `0xffffffff` (zlib mise en œuvre). - -Le type de résultat est UInt32. - -## CRC32IEEE (s) {#crc32ieee} - -Renvoie la somme de contrôle CRC32 d'une chaîne, en utilisant le polynôme CRC-32-IEEE 802.3. - -Le type de résultat est UInt32. - -## CRC64 (s) {#crc64} - -Renvoie la somme de contrôle CRC64 d'une chaîne, en utilisant le polynôme CRC-64-ECMA. - -Le type de résultat est UInt64. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/fr/sql-reference/functions/string-replace-functions.md b/docs/fr/sql-reference/functions/string-replace-functions.md deleted file mode 100644 index 5389a2bc927..00000000000 --- a/docs/fr/sql-reference/functions/string-replace-functions.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 42 -toc_title: "Pour remplacer dans les cha\xEEnes" ---- - -# Fonctions de recherche et de remplacement dans les chaînes {#functions-for-searching-and-replacing-in-strings} - -## replaceOne(botte de foin, modèle, remplacement) {#replaceonehaystack-pattern-replacement} - -Remplace la première occurrence, si elle existe, ‘pattern’ sous-chaîne dans ‘haystack’ avec l' ‘replacement’ substring. -Ci-après, ‘pattern’ et ‘replacement’ doivent être constantes. - -## replaceAll(botte de foin, motif, remplacement), Remplacer(botte de foin, motif, remplacement) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} - -Remplace toutes les occurrences du ‘pattern’ sous-chaîne dans ‘haystack’ avec l' ‘replacement’ substring. - -## replaceRegexpOne(botte de foin, modèle, remplacement) {#replaceregexponehaystack-pattern-replacement} - -Remplacement en utilisant le ‘pattern’ expression régulière. Une expression régulière re2. -Remplace seulement la première occurrence, si elle existe. -Un motif peut être spécifié comme ‘replacement’. Ce modèle peut inclure des substitutions `\0-\9`. -Substitution `\0` inclut l'expression régulière entière. Substitution `\1-\9` correspond au sous-modèle numbers.To utilisez le `\` caractère dans un modèle, échappez-le en utilisant `\`. -Aussi garder à l'esprit qu'un littéral de chaîne nécessite une évasion. - -Exemple 1. Conversion de la date au format américain: - -``` sql -SELECT DISTINCT - EventDate, - replaceRegexpOne(toString(EventDate), '(\\d{4})-(\\d{2})-(\\d{2})', '\\2/\\3/\\1') AS res -FROM test.hits -LIMIT 7 -FORMAT TabSeparated -``` - -``` text -2014-03-17 03/17/2014 -2014-03-18 03/18/2014 -2014-03-19 03/19/2014 -2014-03-20 03/20/2014 -2014-03-21 03/21/2014 -2014-03-22 03/22/2014 -2014-03-23 03/23/2014 -``` - -Exemple 2. Copier une chaîne dix fois: - -``` sql -SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') AS res -``` - -``` text -┌─res────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World! │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -## replaceRegexpAll(botte de foin, modèle, remplacement) {#replaceregexpallhaystack-pattern-replacement} - -Cela fait la même chose, mais remplace toutes les occurrences. Exemple: - -``` sql -SELECT replaceRegexpAll('Hello, World!', '.', '\\0\\0') AS res -``` - -``` text -┌─res────────────────────────┐ -│ HHeelllloo,, WWoorrlldd!! │ -└────────────────────────────┘ -``` - -Par exception, si une expression régulière travaillé sur un vide sous-chaîne, le remplacement n'est pas effectué plus d'une fois. -Exemple: - -``` sql -SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res -``` - -``` text -┌─res─────────────────┐ -│ here: Hello, World! │ -└─────────────────────┘ -``` - -## regexpQuoteMeta (s) {#regexpquotemetas} - -La fonction ajoute une barre oblique inverse avant certains caractères prédéfinis dans la chaîne. -Les personnages prédéfinis: ‘0’, ‘\\’, ‘\|’, ‘(’, ‘)’, ‘^’, ‘$’, ‘.’, ‘\[’, '\]', ‘?’, '\*‘,’+‘,’{‘,’:‘,’-'. -Cette implémentation diffère légèrement de re2:: RE2:: QuoteMeta. Il échappe à zéro octet comme \\0 au lieu de 00 et il échappe uniquement les caractères requis. -Pour plus d'informations, voir le lien: [RE2](https://github.com/google/re2/blob/master/re2/re2.cc#L473) - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/fr/sql-reference/functions/string-search-functions.md b/docs/fr/sql-reference/functions/string-search-functions.md deleted file mode 100644 index 20217edd32c..00000000000 --- a/docs/fr/sql-reference/functions/string-search-functions.md +++ /dev/null @@ -1,379 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 41 -toc_title: "Pour Rechercher Des Cha\xEEnes" ---- - -# Fonctions de recherche de chaînes {#functions-for-searching-strings} - -La recherche est sensible à la casse par défaut dans toutes ces fonctions. Il existe des variantes pour la recherche insensible à la casse. - -## position(botte de foin, aiguille), localiser( botte de foin, aiguille) {#position} - -Renvoie la position (en octets) de la sous-chaîne trouvée dans la chaîne, à partir de 1. - -Fonctionne sous l'hypothèse que la chaîne de caractères contient un ensemble d'octets représentant un octet texte codé. Si cette hypothèse n'est pas remplie et qu'un caractère ne peut pas être représenté à l'aide d'un seul octet, la fonction ne lance pas d'exception et renvoie un résultat inattendu. Si le caractère peut être représenté en utilisant deux octets, il utilisera deux octets et ainsi de suite. - -Pour une recherche insensible à la casse, utilisez la fonction [positioncaseinsensible](#positioncaseinsensitive). - -**Syntaxe** - -``` sql -position(haystack, needle) -``` - -Alias: `locate(haystack, needle)`. - -**Paramètre** - -- `haystack` — string, in which substring will to be searched. [Chaîne](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [Chaîne](../syntax.md#syntax-string-literal). - -**Valeurs renvoyées** - -- Position de départ en octets (à partir de 1), si la sous-chaîne a été trouvée. -- 0, si la sous-chaîne n'a pas été trouvé. - -Type: `Integer`. - -**Exemple** - -Phrase “Hello, world!” contient un ensemble d'octets représentant un octet texte codé. La fonction renvoie un résultat attendu: - -Requête: - -``` sql -SELECT position('Hello, world!', '!') -``` - -Résultat: - -``` text -┌─position('Hello, world!', '!')─┐ -│ 13 │ -└────────────────────────────────┘ -``` - -La même phrase en russe contient des caractères qui ne peuvent pas être représentés en utilisant un seul octet. La fonction renvoie un résultat inattendu (utilisation [positionUTF8](#positionutf8) fonction pour le texte codé sur plusieurs octets): - -Requête: - -``` sql -SELECT position('Привет, мир!', '!') -``` - -Résultat: - -``` text -┌─position('Привет, мир!', '!')─┐ -│ 21 │ -└───────────────────────────────┘ -``` - -## positioncaseinsensible {#positioncaseinsensitive} - -Le même que [position](#position) renvoie la position (en octets) de la sous-chaîne trouvée dans la chaîne, à partir de 1. Utilisez la fonction pour une recherche insensible à la casse. - -Fonctionne sous l'hypothèse que la chaîne de caractères contient un ensemble d'octets représentant un octet texte codé. Si cette hypothèse n'est pas remplie et qu'un caractère ne peut pas être représenté à l'aide d'un seul octet, la fonction ne lance pas d'exception et renvoie un résultat inattendu. Si le caractère peut être représenté en utilisant deux octets, il utilisera deux octets et ainsi de suite. - -**Syntaxe** - -``` sql -positionCaseInsensitive(haystack, needle) -``` - -**Paramètre** - -- `haystack` — string, in which substring will to be searched. [Chaîne](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [Chaîne](../syntax.md#syntax-string-literal). - -**Valeurs renvoyées** - -- Position de départ en octets (à partir de 1), si la sous-chaîne a été trouvée. -- 0, si la sous-chaîne n'a pas été trouvé. - -Type: `Integer`. - -**Exemple** - -Requête: - -``` sql -SELECT positionCaseInsensitive('Hello, world!', 'hello') -``` - -Résultat: - -``` text -┌─positionCaseInsensitive('Hello, world!', 'hello')─┐ -│ 1 │ -└───────────────────────────────────────────────────┘ -``` - -## positionUTF8 {#positionutf8} - -Renvoie la position (en points Unicode) de la sous-chaîne trouvée dans la chaîne, à partir de 1. - -Fonctionne sous l'hypothèse que la chaîne contient un ensemble d'octets représentant un texte codé en UTF-8. Si cette hypothèse n'est pas remplie, la fonction ne lance pas d'exception et renvoie un résultat inattendu. Si le caractère peut être représenté en utilisant deux points Unicode, il en utilisera deux et ainsi de suite. - -Pour une recherche insensible à la casse, utilisez la fonction [positionCaseInsensitiveUTF8](#positioncaseinsensitiveutf8). - -**Syntaxe** - -``` sql -positionUTF8(haystack, needle) -``` - -**Paramètre** - -- `haystack` — string, in which substring will to be searched. [Chaîne](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [Chaîne](../syntax.md#syntax-string-literal). - -**Valeurs renvoyées** - -- Position de départ dans les points Unicode (à partir de 1), si la sous-chaîne a été trouvée. -- 0, si la sous-chaîne n'a pas été trouvé. - -Type: `Integer`. - -**Exemple** - -Phrase “Hello, world!” en russe contient un ensemble de points Unicode représentant un texte codé à un seul point. La fonction renvoie un résultat attendu: - -Requête: - -``` sql -SELECT positionUTF8('Привет, мир!', '!') -``` - -Résultat: - -``` text -┌─positionUTF8('Привет, мир!', '!')─┐ -│ 12 │ -└───────────────────────────────────┘ -``` - -Phrase “Salut, étudiante!” où le caractère `é` peut être représenté en utilisant un point (`U+00E9`) ou deux points (`U+0065U+0301`) la fonction peut être retournée un résultat inattendu: - -Requête pour la lettre `é` qui est représenté un point Unicode `U+00E9`: - -``` sql -SELECT positionUTF8('Salut, étudiante!', '!') -``` - -Résultat: - -``` text -┌─positionUTF8('Salut, étudiante!', '!')─┐ -│ 17 │ -└────────────────────────────────────────┘ -``` - -Requête pour la lettre `é` qui est représenté deux points Unicode `U+0065U+0301`: - -``` sql -SELECT positionUTF8('Salut, étudiante!', '!') -``` - -Résultat: - -``` text -┌─positionUTF8('Salut, étudiante!', '!')─┐ -│ 18 │ -└────────────────────────────────────────┘ -``` - -## positionCaseInsensitiveUTF8 {#positioncaseinsensitiveutf8} - -Le même que [positionUTF8](#positionutf8) mais est sensible à la casse. Renvoie la position (en points Unicode) de la sous-chaîne trouvée dans la chaîne, à partir de 1. - -Fonctionne sous l'hypothèse que la chaîne contient un ensemble d'octets représentant un texte codé en UTF-8. Si cette hypothèse n'est pas remplie, la fonction ne lance pas d'exception et renvoie un résultat inattendu. Si le caractère peut être représenté en utilisant deux points Unicode, il en utilisera deux et ainsi de suite. - -**Syntaxe** - -``` sql -positionCaseInsensitiveUTF8(haystack, needle) -``` - -**Paramètre** - -- `haystack` — string, in which substring will to be searched. [Chaîne](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [Chaîne](../syntax.md#syntax-string-literal). - -**Valeur renvoyée** - -- Position de départ dans les points Unicode (à partir de 1), si la sous-chaîne a été trouvée. -- 0, si la sous-chaîne n'a pas été trouvé. - -Type: `Integer`. - -**Exemple** - -Requête: - -``` sql -SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир') -``` - -Résultat: - -``` text -┌─positionCaseInsensitiveUTF8('Привет, мир!', 'Мир')─┐ -│ 9 │ -└────────────────────────────────────────────────────┘ -``` - -## multirecherchallpositions {#multisearchallpositions} - -Le même que [position](string-search-functions.md#position) mais les retours `Array` des positions (en octets) des sous-chaînes correspondantes trouvées dans la chaîne. Les Positions sont indexées à partir de 1. - -La recherche est effectuée sur des séquences d'octets sans tenir compte de l'encodage et du classement des chaînes. - -- Pour une recherche ASCII insensible à la casse, utilisez la fonction `multiSearchAllPositionsCaseInsensitive`. -- Pour la recherche en UTF-8, Utilisez la fonction [multiSearchAllPositionsUTF8](#multiSearchAllPositionsUTF8). -- Pour la recherche UTF-8 insensible à la casse, utilisez la fonction multiSearchAllPositionsCaseInsensitiveutf8. - -**Syntaxe** - -``` sql -multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) -``` - -**Paramètre** - -- `haystack` — string, in which substring will to be searched. [Chaîne](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [Chaîne](../syntax.md#syntax-string-literal). - -**Valeurs renvoyées** - -- Tableau de positions de départ en octets (à partir de 1), si la sous-chaîne correspondante a été trouvée et 0 si elle n'est pas trouvée. - -**Exemple** - -Requête: - -``` sql -SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']) -``` - -Résultat: - -``` text -┌─multiSearchAllPositions('Hello, World!', ['hello', '!', 'world'])─┐ -│ [0,13,0] │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## multiSearchAllPositionsUTF8 {#multiSearchAllPositionsUTF8} - -Voir `multiSearchAllPositions`. - -## multiSearchFirstPosition(botte de foin, \[aiguille1, aiguille2, …, needleet\]) {#multisearchfirstposition} - -Le même que `position` mais renvoie le décalage le plus à gauche de la chaîne `haystack` et qui correspond à certains des aiguilles. - -Pour une recherche insensible à la casse ou/et au format UTF-8, utilisez les fonctions `multiSearchFirstPositionCaseInsensitive, multiSearchFirstPositionUTF8, multiSearchFirstPositionCaseInsensitiveUTF8`. - -## multiSearchFirstIndex(botte de foin, \[aiguille1, aiguille2, …, needleet\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} - -Renvoie l'index `i` (à partir de 1) de l'aiguille trouvée la plus à gaucheje dans la chaîne `haystack` et 0 sinon. - -Pour une recherche insensible à la casse ou/et au format UTF-8, utilisez les fonctions `multiSearchFirstIndexCaseInsensitive, multiSearchFirstIndexUTF8, multiSearchFirstIndexCaseInsensitiveUTF8`. - -## multiSearchAny(botte de foin, \[aiguille1, aiguille2, …, needleet\]) {#function-multisearchany} - -Renvoie 1, si au moins une aiguille de chaîneje correspond à la chaîne `haystack` et 0 sinon. - -Pour une recherche insensible à la casse ou/et au format UTF-8, utilisez les fonctions `multiSearchAnyCaseInsensitive, multiSearchAnyUTF8, multiSearchAnyCaseInsensitiveUTF8`. - -!!! note "Note" - Dans tous les `multiSearch*` fonctions le nombre d'aiguilles doit être d'au moins 28 en raison de la spécification de mise en œuvre. - -## match (botte de foin, motif) {#matchhaystack-pattern} - -Vérifie si la chaîne correspond au `pattern` expression régulière. Un `re2` expression régulière. Le [syntaxe](https://github.com/google/re2/wiki/Syntax) de la `re2` les expressions régulières sont plus limitées que la syntaxe des expressions régulières Perl. - -Renvoie 0 si elle ne correspond pas, ou 1 si elle correspond. - -Notez que le symbole antislash (`\`) est utilisé pour s'échapper dans l'expression régulière. Le même symbole est utilisé pour échapper dans les littéraux de chaîne. Donc, pour échapper au symbole dans une expression régulière, vous devez écrire deux barres obliques inverses ( \\ ) dans un littéral de chaîne. - -L'expression régulière travaille à la chaîne, comme si c'est un ensemble d'octets. L'expression régulière ne peut pas contenir d'octets nuls. -Pour que les modèles recherchent des sous-chaînes dans une chaîne, il est préférable D'utiliser LIKE ou ‘position’ depuis ils travaillent beaucoup plus vite. - -## multiMatchAny(botte de foin, \[motif1, modèle2, …, patternet\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} - -Le même que `match` mais renvoie 0 si aucune des expressions régulières sont appariés et 1 si l'un des modèles les matchs. Il utilise [hyperscan](https://github.com/intel/hyperscan) bibliothèque. Pour que les modèles recherchent des sous-chaînes dans une chaîne, il est préférable d'utiliser `multiSearchAny` comme cela fonctionne beaucoup plus vite. - -!!! note "Note" - La longueur de l'un des `haystack` la chaîne doit être inférieure à 232 octets sinon l'exception est levée. Cette restriction a lieu en raison de l'API hyperscan. - -## multiMatchAnyIndex(botte de foin, \[motif1, modèle2, …, patternet\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} - -Le même que `multiMatchAny` mais retourne un index qui correspond à la botte de foin. - -## multiMatchAllIndices(botte de foin, \[motif1, modèle2, …, patternet\]) {#multimatchallindiceshaystack-pattern1-pattern2-patternn} - -Le même que `multiMatchAny`, mais renvoie le tableau de tous les indices qui correspondent à la botte de foin dans n'importe quel ordre. - -## multiFuzzyMatchAny(botte de foin, distance, \[motif1, modèle2, …, patternet\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} - -Le même que `multiMatchAny`, mais renvoie 1 si un motif correspond à la botte de foin dans une constante [distance d'édition](https://en.wikipedia.org/wiki/Edit_distance). Cette fonction est également en mode expérimental et peut être extrêmement lente. Pour plus d'informations, voir [documentation hyperscan](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching). - -## multiFuzzyMatchAnyIndex(botte de foin, distance, \[motif1, modèle2, …, patternet\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} - -Le même que `multiFuzzyMatchAny`, mais renvoie tout index qui correspond à la botte de foin à une distance d'édition constante. - -## multiFuzzyMatchAllIndices(botte de foin, distance, \[motif1, modèle2, …, patternet\]) {#multifuzzymatchallindiceshaystack-distance-pattern1-pattern2-patternn} - -Le même que `multiFuzzyMatchAny`, mais renvoie le tableau de tous les indices dans n'importe quel ordre qui correspond à la botte de foin à une distance d'édition constante. - -!!! note "Note" - `multiFuzzyMatch*` les fonctions ne prennent pas en charge les expressions régulières UTF-8, et ces expressions sont traitées comme des octets en raison de la restriction hyperscan. - -!!! note "Note" - Pour désactiver toutes les fonctions qui utilisent hyperscan, utilisez le réglage `SET allow_hyperscan = 0;`. - -## extrait(botte de foin, motif) {#extracthaystack-pattern} - -Extraits d'un fragment d'une chaîne à l'aide d'une expression régulière. Si ‘haystack’ ne correspond pas à l' ‘pattern’ regex, une chaîne vide est renvoyée. Si l'expression rationnelle ne contient pas de sous-modèles, elle prend le fragment qui correspond à l'expression rationnelle entière. Sinon, il prend le fragment qui correspond au premier sous-masque. - -## extractAll(botte de foin, motif) {#extractallhaystack-pattern} - -Extrait tous les fragments d'une chaîne à l'aide d'une expression régulière. Si ‘haystack’ ne correspond pas à l' ‘pattern’ regex, une chaîne vide est renvoyée. Renvoie un tableau de chaînes composé de toutes les correspondances à l'expression rationnelle. En général, le comportement est le même que le ‘extract’ fonction (il prend le premier sous-masque, ou l'expression entière s'il n'y a pas de sous-masque). - -## comme (botte de foin, motif), botte de foin comme opérateur de motif {#function-like} - -Vérifie si une chaîne correspond à une expression régulière simple. -L'expression régulière peut contenir les métasymboles `%` et `_`. - -`%` indique n'importe quelle quantité d'octets (y compris zéro caractère). - -`_` indique un octet. - -Utilisez la barre oblique inverse (`\`) pour échapper aux métasymboles. Voir la note sur l'échappement dans la description du ‘match’ fonction. - -Pour les expressions régulières comme `%needle%`, le code est plus optimale et fonctionne aussi vite que le `position` fonction. -Pour d'autres expressions régulières, le code est le même que pour la ‘match’ fonction. - -## notLike (botte de foin, motif), botte de foin pas comme opérateur de motif {#function-notlike} - -La même chose que ‘like’ mais négative. - -## ngramDistance(botte de foin, aiguille) {#ngramdistancehaystack-needle} - -Calcule la distance de 4 grammes entre `haystack` et `needle`: counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns float number from 0 to 1 – the closer to zero, the more strings are similar to each other. If the constant `needle` ou `haystack` est plus de 32Kb, jette une exception. Si une partie de la non-constante `haystack` ou `needle` les chaînes sont plus que 32Kb, la distance est toujours un. - -Pour une recherche insensible à la casse ou/et au format UTF-8, utilisez les fonctions `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseInsensitiveUTF8`. - -## ngramSearch(botte de foin, aiguille) {#ngramsearchhaystack-needle} - -Même que `ngramDistance` mais calcule la différence non symétrique entre `needle` et `haystack` – the number of n-grams from needle minus the common number of n-grams normalized by the number of `needle` n-grammes. Le plus proche d'un, le plus probable `needle` est dans le `haystack`. Peut être utile pour la recherche de chaîne floue. - -Pour une recherche insensible à la casse ou/et au format UTF-8, utilisez les fonctions `ngramSearchCaseInsensitive, ngramSearchUTF8, ngramSearchCaseInsensitiveUTF8`. - -!!! note "Note" - For UTF-8 case we use 3-gram distance. All these are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/fr/sql-reference/functions/type-conversion-functions.md b/docs/fr/sql-reference/functions/type-conversion-functions.md deleted file mode 100644 index c17b24c69dc..00000000000 --- a/docs/fr/sql-reference/functions/type-conversion-functions.md +++ /dev/null @@ -1,534 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 38 -toc_title: La Conversion De Type ---- - -# Fonctions De Conversion De Type {#type-conversion-functions} - -## Problèmes courants des Conversions numériques {#numeric-conversion-issues} - -Lorsque vous convertissez une valeur d'un type de données à un autre, vous devez vous rappeler que dans le cas courant, il s'agit d'une opération dangereuse qui peut entraîner une perte de données. Une perte de données peut se produire si vous essayez d'ajuster la valeur d'un type de données plus grand à un type de données plus petit, ou si vous convertissez des valeurs entre différents types de données. - -ClickHouse a le [même comportement que les programmes C++ ](https://en.cppreference.com/w/cpp/language/implicit_conversion). - -## toInt (8/16/32/64) {#toint8163264} - -Convertit une valeur d'entrée en [Int](../../sql-reference/data-types/int-uint.md) type de données. Cette fonction comprend: - -- `toInt8(expr)` — Results in the `Int8` type de données. -- `toInt16(expr)` — Results in the `Int16` type de données. -- `toInt32(expr)` — Results in the `Int32` type de données. -- `toInt64(expr)` — Results in the `Int64` type de données. - -**Paramètre** - -- `expr` — [Expression](../syntax.md#syntax-expressions) renvoyer un nombre ou une chaîne avec la représentation décimale d'un nombre. Les représentations binaires, octales et hexadécimales des nombres ne sont pas prises en charge. Les zéros principaux sont dépouillés. - -**Valeur renvoyée** - -Valeur entière dans le `Int8`, `Int16`, `Int32`, ou `Int64` type de données. - -Fonctions d'utilisation [l'arrondi vers zéro](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), ce qui signifie qu'ils tronquent des chiffres fractionnaires de nombres. - -Le comportement des fonctions pour le [NaN et Inf](../../sql-reference/data-types/float.md#data_type-float-nan-inf) arguments est indéfini. Rappelez-vous sur [problèmes de conversion numérique](#numeric-conversion-issues), lorsque vous utilisez les fonctions. - -**Exemple** - -``` sql -SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8) -``` - -``` text -┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┐ -│ -9223372036854775808 │ 32 │ 16 │ 8 │ -└──────────────────────┴─────────────┴───────────────┴─────────────┘ -``` - -## toInt (8/16/32/64)OrZero {#toint8163264orzero} - -Il prend un argument de type String et essaie de l'analyser en Int (8 \| 16 \| 32 \| 64). En cas d'échec, renvoie 0. - -**Exemple** - -``` sql -select toInt64OrZero('123123'), toInt8OrZero('123qwe123') -``` - -``` text -┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┐ -│ 123123 │ 0 │ -└─────────────────────────┴───────────────────────────┘ -``` - -## toInt (8/16/32/64)OrNull {#toint8163264ornull} - -Il prend un argument de type String et essaie de l'analyser en Int (8 \| 16 \| 32 \| 64). En cas d'échec, renvoie NULL. - -**Exemple** - -``` sql -select toInt64OrNull('123123'), toInt8OrNull('123qwe123') -``` - -``` text -┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┐ -│ 123123 │ ᴺᵁᴸᴸ │ -└─────────────────────────┴───────────────────────────┘ -``` - -## toUInt (8/16/32/64) {#touint8163264} - -Convertit une valeur d'entrée en [UInt](../../sql-reference/data-types/int-uint.md) type de données. Cette fonction comprend: - -- `toUInt8(expr)` — Results in the `UInt8` type de données. -- `toUInt16(expr)` — Results in the `UInt16` type de données. -- `toUInt32(expr)` — Results in the `UInt32` type de données. -- `toUInt64(expr)` — Results in the `UInt64` type de données. - -**Paramètre** - -- `expr` — [Expression](../syntax.md#syntax-expressions) renvoyer un nombre ou une chaîne avec la représentation décimale d'un nombre. Les représentations binaires, octales et hexadécimales des nombres ne sont pas prises en charge. Les zéros principaux sont dépouillés. - -**Valeur renvoyée** - -Valeur entière dans le `UInt8`, `UInt16`, `UInt32`, ou `UInt64` type de données. - -Fonctions d'utilisation [l'arrondi vers zéro](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), ce qui signifie qu'ils tronquent des chiffres fractionnaires de nombres. - -Le comportement des fonctions pour les agruments négatifs et pour le [NaN et Inf](../../sql-reference/data-types/float.md#data_type-float-nan-inf) arguments est indéfini. Si vous passez une chaîne avec un nombre négatif, par exemple `'-32'`, ClickHouse soulève une exception. Rappelez-vous sur [problèmes de conversion numérique](#numeric-conversion-issues), lorsque vous utilisez les fonctions. - -**Exemple** - -``` sql -SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) -``` - -``` text -┌───────toUInt64(nan)─┬─toUInt32(-32)─┬─toUInt16('16')─┬─toUInt8(8.8)─┐ -│ 9223372036854775808 │ 4294967264 │ 16 │ 8 │ -└─────────────────────┴───────────────┴────────────────┴──────────────┘ -``` - -## toUInt (8/16/32/64)OrZero {#touint8163264orzero} - -## toUInt (8/16/32/64)OrNull {#touint8163264ornull} - -## toFloat (32/64) {#tofloat3264} - -## toFloat (32/64)OrZero {#tofloat3264orzero} - -## toFloat (32/64) OrNull {#tofloat3264ornull} - -## jour {#todate} - -## toDateOrZero {#todateorzero} - -## toDateOrNull {#todateornull} - -## toDateTime {#todatetime} - -## toDateTimeOrZero {#todatetimeorzero} - -## toDateTimeOrNull {#todatetimeornull} - -## toDecimal (32/64/128) {#todecimal3264128} - -Convertir `value` à l' [Décimal](../../sql-reference/data-types/decimal.md) type de données avec précision de `S`. Le `value` peut être un nombre ou une chaîne. Le `S` (l'échelle) paramètre spécifie le nombre de décimales. - -- `toDecimal32(value, S)` -- `toDecimal64(value, S)` -- `toDecimal128(value, S)` - -## toDecimal (32/64/128) OrNull {#todecimal3264128ornull} - -Convertit une chaîne d'entrée en [Nullable (Décimal (P, S))](../../sql-reference/data-types/decimal.md) valeur de type de données. Cette famille de fonctions comprennent: - -- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` type de données. -- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` type de données. -- `toDecimal128OrNull(expr, S)` — Results in `Nullable(Decimal128(S))` type de données. - -Ces fonctions devraient être utilisées à la place de `toDecimal*()` fonctions, si vous préférez obtenir un `NULL` la valeur au lieu d'une exception dans le cas d'une valeur d'entrée erreur d'analyse. - -**Paramètre** - -- `expr` — [Expression](../syntax.md#syntax-expressions), retourne une valeur dans l' [Chaîne](../../sql-reference/data-types/string.md) type de données. ClickHouse attend la représentation textuelle du nombre décimal. Exemple, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. - -**Valeur renvoyée** - -Une valeur dans l' `Nullable(Decimal(P,S))` type de données. La valeur contient: - -- Numéro `S` décimales, si ClickHouse interprète la chaîne d'entrée comme un nombre. -- `NULL` si ClickHouse ne peut pas interpréter la chaîne d'entrée comme un nombre ou si le nombre d'entrée contient plus de `S` décimale. - -**Exemple** - -``` sql -SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val) -``` - -``` text -┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐ -│ -1.11100 │ Nullable(Decimal(9, 5)) │ -└──────────┴────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) -``` - -``` text -┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┐ -│ ᴺᵁᴸᴸ │ Nullable(Decimal(9, 2)) │ -└──────┴────────────────────────────────────────────────────┘ -``` - -## toDecimal (32/64/128)OrZero {#todecimal3264128orzero} - -Convertit une valeur d'entrée en [Decimal(P,S)](../../sql-reference/data-types/decimal.md) type de données. Cette famille de fonctions comprennent: - -- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` type de données. -- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` type de données. -- `toDecimal128OrZero( expr, S)` — Results in `Decimal128(S)` type de données. - -Ces fonctions devraient être utilisées à la place de `toDecimal*()` fonctions, si vous préférez obtenir un `0` la valeur au lieu d'une exception dans le cas d'une valeur d'entrée erreur d'analyse. - -**Paramètre** - -- `expr` — [Expression](../syntax.md#syntax-expressions), retourne une valeur dans l' [Chaîne](../../sql-reference/data-types/string.md) type de données. ClickHouse attend la représentation textuelle du nombre décimal. Exemple, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. - -**Valeur renvoyée** - -Une valeur dans l' `Nullable(Decimal(P,S))` type de données. La valeur contient: - -- Numéro `S` décimales, si ClickHouse interprète la chaîne d'entrée comme un nombre. -- 0 avec `S` décimales, si ClickHouse ne peut pas interpréter la chaîne d'entrée comme un nombre ou si le nombre d'entrée contient plus de `S` décimale. - -**Exemple** - -``` sql -SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val) -``` - -``` text -┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐ -│ -1.11100 │ Decimal(9, 5) │ -└──────────┴────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val) -``` - -``` text -┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┐ -│ 0.00 │ Decimal(9, 2) │ -└──────┴────────────────────────────────────────────────────┘ -``` - -## toString {#tostring} - -Fonctions de conversion entre des nombres, des chaînes (mais pas des chaînes fixes), des dates et des dates avec des heures. -Toutes ces fonctions acceptent un argument. - -Lors de la conversion vers ou à partir d'une chaîne, la valeur est formatée ou analysée en utilisant les mêmes règles que pour le format TabSeparated (et presque tous les autres formats de texte). Si la chaîne ne peut pas être analysée, une exception est levée et la demande est annulée. - -Lors de la conversion de dates en nombres ou vice versa, la date correspond au nombre de jours depuis le début de L'époque Unix. -Lors de la conversion de dates avec des heures en nombres ou vice versa, la date avec l'heure correspond au nombre de secondes depuis le début de L'époque Unix. - -Les formats date et date-avec-heure pour les fonctions toDate/toDateTime sont définis comme suit: - -``` text -YYYY-MM-DD -YYYY-MM-DD hh:mm:ss -``` - -À titre d'exception, si vous convertissez des types numériques UInt32, Int32, UInt64 ou Int64 à Date, et si le nombre est supérieur ou égal à 65536, le nombre est interprété comme un horodatage Unix (et non comme le nombre de jours) et est arrondi à la date. Cela permet de prendre en charge l'occurrence commune de l'écriture ‘toDate(unix_timestamp)’, qui autrement serait une erreur et nécessiterait d'écrire le plus lourd ‘toDate(toDateTime(unix_timestamp))’. - -La Conversion entre une date et une date avec l'heure est effectuée de manière naturelle: en ajoutant une heure nulle ou en supprimant l'heure. - -La Conversion entre types numériques utilise les mêmes règles que les affectations entre différents types numériques en C++. - -De plus, la fonction ToString de L'argument DateTime peut prendre un deuxième argument de chaîne contenant le nom du fuseau horaire. Exemple: `Asia/Yekaterinburg` Dans ce cas, l'heure est formatée en fonction du fuseau horaire spécifié. - -``` sql -SELECT - now() AS now_local, - toString(now(), 'Asia/Yekaterinburg') AS now_yekat -``` - -``` text -┌───────────now_local─┬─now_yekat───────────┐ -│ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │ -└─────────────────────┴─────────────────────┘ -``` - -Voir aussi l' `toUnixTimestamp` fonction. - -## toFixedString (s, N) {#tofixedstrings-n} - -Convertit un argument de type String en un type FixedString (N) (une chaîne de longueur fixe N). N doit être une constante. -Si la chaîne a moins d'octets que N, elle est complétée avec des octets null à droite. Si la chaîne a plus d'octets que N, une exception est levée. - -## toStringCutToZero(s) {#tostringcuttozeros} - -Accepte un argument String ou FixedString. Renvoie la chaîne avec le contenu tronqué au premier octet zéro trouvé. - -Exemple: - -``` sql -SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut -``` - -``` text -┌─s─────────────┬─s_cut─┐ -│ foo\0\0\0\0\0 │ foo │ -└───────────────┴───────┘ -``` - -``` sql -SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut -``` - -``` text -┌─s──────────┬─s_cut─┐ -│ foo\0bar\0 │ foo │ -└────────────┴───────┘ -``` - -## reinterpretAsUInt (8/16/32/64) {#reinterpretasuint8163264} - -## reinterpretAsInt (8/16/32/64) {#reinterpretasint8163264} - -## reinterpretAsFloat (32/64) {#reinterpretasfloat3264} - -## réinterprétasdate {#reinterpretasdate} - -## reinterpretAsDateTime {#reinterpretasdatetime} - -Ces fonctions acceptent une chaîne et interprètent les octets placés au début de la chaîne comme un nombre dans l'ordre de l'hôte (little endian). Si la chaîne n'est pas assez longue, les fonctions fonctionnent comme si la chaîne était remplie avec le nombre nécessaire d'octets nuls. Si la chaîne est plus longue que nécessaire, les octets supplémentaires sont ignorés. Une date est interprétée comme le nombre de jours depuis le début de l'Époque Unix, et une date avec le temps, est interprété comme le nombre de secondes écoulées depuis le début de l'Époque Unix. - -## reinterpretAsString {#type_conversion_functions-reinterpretAsString} - -Cette fonction accepte un nombre ou une date ou une date avec l'heure, et renvoie une chaîne contenant des octets représentant la valeur correspondante dans l'ordre de l'hôte (little endian). Les octets nuls sont supprimés de la fin. Par exemple, une valeur de type uint32 de 255 est une chaîne longue d'un octet. - -## reinterpretAsFixedString {#reinterpretasfixedstring} - -Cette fonction accepte un nombre ou une date ou une date avec l'heure, et renvoie une chaîne fixe contenant des octets représentant la valeur correspondante dans l'ordre de l'hôte (little endian). Les octets nuls sont supprimés de la fin. Par exemple, une valeur de type uint32 de 255 est une chaîne fixe longue d'un octet. - -## CAST (x, T) {#type_conversion_function-cast} - -Convertir ‘x’ à l' ‘t’ type de données. La syntaxe CAST (X comme t) est également prise en charge. - -Exemple: - -``` sql -SELECT - '2016-06-15 23:00:00' AS timestamp, - CAST(timestamp AS DateTime) AS datetime, - CAST(timestamp AS Date) AS date, - CAST(timestamp, 'String') AS string, - CAST(timestamp, 'FixedString(22)') AS fixed_string -``` - -``` text -┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐ -│ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │ -└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ -``` - -La Conversion en FixedString (N) ne fonctionne que pour les arguments de type String ou FixedString (N). - -Type conversion en [Nullable](../../sql-reference/data-types/nullable.md) et le dos est pris en charge. Exemple: - -``` sql -SELECT toTypeName(x) FROM t_null -``` - -``` text -┌─toTypeName(x)─┐ -│ Int8 │ -│ Int8 │ -└───────────────┘ -``` - -``` sql -SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null -``` - -``` text -┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐ -│ Nullable(UInt16) │ -│ Nullable(UInt16) │ -└─────────────────────────────────────────┘ -``` - -## toInterval (année / trimestre / Mois / Semaine / Jour / Heure / Minute / Seconde) {#function-tointerval} - -Convertit un argument de type Number en [Intervalle](../../sql-reference/data-types/special-data-types/interval.md) type de données. - -**Syntaxe** - -``` sql -toIntervalSecond(number) -toIntervalMinute(number) -toIntervalHour(number) -toIntervalDay(number) -toIntervalWeek(number) -toIntervalMonth(number) -toIntervalQuarter(number) -toIntervalYear(number) -``` - -**Paramètre** - -- `number` — Duration of interval. Positive integer number. - -**Valeurs renvoyées** - -- La valeur de `Interval` type de données. - -**Exemple** - -``` sql -WITH - toDate('2019-01-01') AS date, - INTERVAL 1 WEEK AS interval_week, - toIntervalWeek(1) AS interval_to_week -SELECT - date + interval_week, - date + interval_to_week -``` - -``` text -┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ -│ 2019-01-08 │ 2019-01-08 │ -└───────────────────────────┴──────────────────────────────┘ -``` - -## parseDateTimeBestEffort {#parsedatetimebesteffort} - -Convertit une date et une heure dans le [Chaîne](../../sql-reference/data-types/string.md) la représentation de [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) type de données. - -La fonction d'analyse [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 date et heure Spécification](https://tools.ietf.org/html/rfc1123#page-55), ClickHouse et d'autres formats de date et d'heure. - -**Syntaxe** - -``` sql -parseDateTimeBestEffort(time_string [, time_zone]); -``` - -**Paramètre** - -- `time_string` — String containing a date and time to convert. [Chaîne](../../sql-reference/data-types/string.md). -- `time_zone` — Time zone. The function parses `time_string` selon le fuseau horaire. [Chaîne](../../sql-reference/data-types/string.md). - -**Formats non standard pris en charge** - -- Une chaîne contenant 9..10 chiffres [le timestamp unix](https://en.wikipedia.org/wiki/Unix_time). -- Une chaîne avec une date et une heure composant: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. -- Une chaîne avec une date, mais pas de composant de temps: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc. -- Une chaîne avec un jour et une heure: `DD`, `DD hh`, `DD hh:mm`. Dans ce cas `YYYY-MM` sont substitués comme suit `2000-01`. -- Une chaîne qui inclut la date et l'heure ainsi que des informations de décalage de fuseau horaire: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. Exemple, `2020-12-12 17:36:00 -5:00`. - -Pour tous les formats avec séparateur, la fonction analyse les noms de mois exprimés par leur nom complet ou par les trois premières lettres d'un nom de mois. Exemple: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. - -**Valeur renvoyée** - -- `time_string` converti à l' `DateTime` type de données. - -**Exemple** - -Requête: - -``` sql -SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') -AS parseDateTimeBestEffort; -``` - -Résultat: - -``` text -┌─parseDateTimeBestEffort─┐ -│ 2020-12-12 12:12:57 │ -└─────────────────────────┘ -``` - -Requête: - -``` sql -SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') -AS parseDateTimeBestEffort -``` - -Résultat: - -``` text -┌─parseDateTimeBestEffort─┐ -│ 2018-08-18 10:22:16 │ -└─────────────────────────┘ -``` - -Requête: - -``` sql -SELECT parseDateTimeBestEffort('1284101485') -AS parseDateTimeBestEffort -``` - -Résultat: - -``` text -┌─parseDateTimeBestEffort─┐ -│ 2015-07-07 12:04:41 │ -└─────────────────────────┘ -``` - -Requête: - -``` sql -SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') -AS parseDateTimeBestEffort -``` - -Résultat: - -``` text -┌─parseDateTimeBestEffort─┐ -│ 2018-12-12 10:12:12 │ -└─────────────────────────┘ -``` - -Requête: - -``` sql -SELECT parseDateTimeBestEffort('10 20:19') -``` - -Résultat: - -``` text -┌─parseDateTimeBestEffort('10 20:19')─┐ -│ 2000-01-10 20:19:00 │ -└─────────────────────────────────────┘ -``` - -**Voir Aussi** - -- \[Annonce ISO 8601 par @xkcd\](https://xkcd.com/1179/) -- [RFC 1123](https://tools.ietf.org/html/rfc1123) -- [jour](#todate) -- [toDateTime](#todatetime) - -## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} - -De même que pour [parseDateTimeBestEffort](#parsedatetimebesteffort) sauf qu'il renvoie null lorsqu'il rencontre un format de date qui ne peut pas être traité. - -## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} - -De même que pour [parseDateTimeBestEffort](#parsedatetimebesteffort) sauf qu'il renvoie une date zéro ou une date zéro lorsqu'il rencontre un format de date qui ne peut pas être traité. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/fr/sql-reference/functions/url-functions.md b/docs/fr/sql-reference/functions/url-functions.md deleted file mode 100644 index 2bb2203a10b..00000000000 --- a/docs/fr/sql-reference/functions/url-functions.md +++ /dev/null @@ -1,209 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 54 -toc_title: Travailler avec des URL ---- - -# Fonctions pour travailler avec des URL {#functions-for-working-with-urls} - -Toutes ces fonctions ne suivent pas la RFC. Ils sont simplifiés au maximum pour améliorer les performances. - -## Fonctions qui extraient des parties d'une URL {#functions-that-extract-parts-of-a-url} - -Si la partie pertinente n'est pas présente dans une URL, une chaîne vide est renvoyée. - -### protocole {#protocol} - -Extrait le protocole d'une URL. - -Examples of typical returned values: http, https, ftp, mailto, tel, magnet… - -### domaine {#domain} - -Extrait le nom d'hôte d'une URL. - -``` sql -domain(url) -``` - -**Paramètre** - -- `url` — URL. Type: [Chaîne](../../sql-reference/data-types/string.md). - -L'URL peut être spécifiée avec ou sans schéma. Exemple: - -``` text -svn+ssh://some.svn-hosting.com:80/repo/trunk -some.svn-hosting.com:80/repo/trunk -https://yandex.com/time/ -``` - -Pour ces exemples, le `domain` la fonction renvoie les résultats suivants: - -``` text -some.svn-hosting.com -some.svn-hosting.com -yandex.com -``` - -**Valeurs renvoyées** - -- Nom d'hôte. Si ClickHouse peut analyser la chaîne d'entrée en tant QU'URL. -- Chaîne vide. Si ClickHouse ne peut pas analyser la chaîne d'entrée en tant QU'URL. - -Type: `String`. - -**Exemple** - -``` sql -SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk') -``` - -``` text -┌─domain('svn+ssh://some.svn-hosting.com:80/repo/trunk')─┐ -│ some.svn-hosting.com │ -└────────────────────────────────────────────────────────┘ -``` - -### domainWithoutWWW {#domainwithoutwww} - -Renvoie le domaine et ne supprime pas plus d'un ‘www.’ dès le début de celui-ci, si présent. - -### topLevelDomain {#topleveldomain} - -Extrait le domaine de premier niveau d'une URL. - -``` sql -topLevelDomain(url) -``` - -**Paramètre** - -- `url` — URL. Type: [Chaîne](../../sql-reference/data-types/string.md). - -L'URL peut être spécifiée avec ou sans schéma. Exemple: - -``` text -svn+ssh://some.svn-hosting.com:80/repo/trunk -some.svn-hosting.com:80/repo/trunk -https://yandex.com/time/ -``` - -**Valeurs renvoyées** - -- Nom de domaine. Si ClickHouse peut analyser la chaîne d'entrée en tant QU'URL. -- Chaîne vide. Si ClickHouse ne peut pas analyser la chaîne d'entrée en tant QU'URL. - -Type: `String`. - -**Exemple** - -``` sql -SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') -``` - -``` text -┌─topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')─┐ -│ com │ -└────────────────────────────────────────────────────────────────────┘ -``` - -### firstSignificantSubdomain {#firstsignificantsubdomain} - -Renvoie la “first significant subdomain”. C'est un concept non standard spécifique à Yandex.Metrica. Le premier sous-domaine significatif est un domaine de deuxième niveau s'il est ‘com’, ‘net’, ‘org’, ou ‘co’. Sinon, il est un domaine de troisième niveau. Exemple, `firstSignificantSubdomain (‘https://news.yandex.ru/’) = ‘yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’`. La liste des “insignificant” les domaines de deuxième niveau et d'autres détails de mise en œuvre peuvent changer à l'avenir. - -### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} - -Renvoie la partie du domaine qui inclut les sous-domaines de premier niveau “first significant subdomain” (voir l'explication ci-dessus). - -Exemple, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`. - -### chemin {#path} - -Retourne le chemin d'accès. Exemple: `/top/news.html` Le chemin n'inclut pas la chaîne de requête. - -### pathFull {#pathfull} - -La même chose que ci-dessus, mais y compris la chaîne de requête et le fragment. Exemple: / top / nouvelles.le html?page = 2 # commentaires - -### queryString {#querystring} - -Retourne la chaîne de requête. Exemple: page = 1 & lr=213. query-string n'inclut pas le point d'interrogation initial, ainsi que # et tout ce qui suit #. - -### fragment {#fragment} - -Renvoie l'identificateur de fragment. fragment n'inclut pas le symbole de hachage initial. - -### queryStringAndFragment {#querystringandfragment} - -Renvoie la chaîne de requête et l'Identificateur de fragment. Exemple: page = 1 # 29390. - -### extractURLParameter (URL, nom) {#extracturlparameterurl-name} - -Renvoie la valeur de la ‘name’ paramètre dans l'URL, le cas échéant. Sinon, une chaîne vide. S'il y a beaucoup de paramètres avec ce nom, il renvoie la première occurrence. Cette fonction fonctionne en supposant que le nom du paramètre est codé dans L'URL exactement de la même manière que dans l'argument passé. - -### extractURLParameters (URL) {#extracturlparametersurl} - -Renvoie un tableau de chaînes name = value correspondant aux paramètres D'URL. Les valeurs ne sont en aucun cas décodées. - -### extractURLParameterNames (URL) {#extracturlparameternamesurl} - -Retourne un tableau de chaînes de noms correspondant aux noms des paramètres d'URL. Les valeurs ne sont en aucun cas décodées. - -### URLHierarchy (URL) {#urlhierarchyurl} - -Retourne un tableau contenant L'URL, tronquée à la fin par les symboles /,? dans le chemin et la chaîne de requête. Les caractères séparateurs consécutifs sont comptés comme un. La coupe est faite dans la position après tous les caractères de séparation consécutifs. - -### URLPathHierarchy (URL) {#urlpathhierarchyurl} - -La même chose que ci-dessus, mais sans le protocole et l'hôte dans le résultat. Le / les élément (racine) n'est pas inclus. Exemple: la fonction est utilisée pour implémenter l'arborescence des rapports de L'URL dans Yandex. Métrique. - -``` text -URLPathHierarchy('https://example.com/browse/CONV-6788') = -[ - '/browse/', - '/browse/CONV-6788' -] -``` - -### decodeURLComponent (URL) {#decodeurlcomponenturl} - -Renvoie L'URL décodée. -Exemple: - -``` sql -SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS DecodedURL; -``` - -``` text -┌─DecodedURL─────────────────────────────┐ -│ http://127.0.0.1:8123/?query=SELECT 1; │ -└────────────────────────────────────────┘ -``` - -## Fonctions qui suppriment une partie D'une URL {#functions-that-remove-part-of-a-url} - -Si L'URL n'a rien de similaire, L'URL reste inchangée. - -### cutWWW {#cutwww} - -Supprime pas plus d'une ‘www.’ depuis le début du domaine de L'URL, s'il est présent. - -### cutQueryString {#cutquerystring} - -Supprime la chaîne de requête. Le point d'interrogation est également supprimé. - -### cutFragment {#cutfragment} - -Supprime l'identificateur de fragment. Le signe est également supprimé. - -### couperystringandfragment {#cutquerystringandfragment} - -Supprime la chaîne de requête et l'Identificateur de fragment. Le point d'interrogation et le signe numérique sont également supprimés. - -### cutURLParameter (URL, nom) {#cuturlparameterurl-name} - -Supprime le ‘name’ Paramètre URL, si présent. Cette fonction fonctionne en supposant que le nom du paramètre est codé dans L'URL exactement de la même manière que dans l'argument passé. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/fr/sql-reference/functions/uuid-functions.md b/docs/fr/sql-reference/functions/uuid-functions.md deleted file mode 100644 index 9f9eb67d3e9..00000000000 --- a/docs/fr/sql-reference/functions/uuid-functions.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 53 -toc_title: Travailler avec UUID ---- - -# Fonctions pour travailler avec UUID {#functions-for-working-with-uuid} - -Les fonctions pour travailler avec UUID sont listées ci-dessous. - -## generateUUIDv4 {#uuid-function-generate} - -Génère le [UUID](../../sql-reference/data-types/uuid.md) de [la version 4](https://tools.ietf.org/html/rfc4122#section-4.4). - -``` sql -generateUUIDv4() -``` - -**Valeur renvoyée** - -La valeur de type UUID. - -**Exemple d'utilisation** - -Cet exemple montre la création d'une table avec la colonne de type UUID et l'insertion d'une valeur dans la table. - -``` sql -CREATE TABLE t_uuid (x UUID) ENGINE=TinyLog - -INSERT INTO t_uuid SELECT generateUUIDv4() - -SELECT * FROM t_uuid -``` - -``` text -┌────────────────────────────────────x─┐ -│ f4bf890f-f9dc-4332-ad5c-0c18e73f28e9 │ -└──────────────────────────────────────┘ -``` - -## toUUID (x) {#touuid-x} - -Convertit la valeur de type de chaîne en type UUID. - -``` sql -toUUID(String) -``` - -**Valeur renvoyée** - -La valeur de type UUID. - -**Exemple d'utilisation** - -``` sql -SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid -``` - -``` text -┌─────────────────────────────────uuid─┐ -│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │ -└──────────────────────────────────────┘ -``` - -## UUIDStringToNum {#uuidstringtonum} - -Accepte une chaîne contenant 36 caractères dans le format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, et le renvoie comme un ensemble d'octets dans un [FixedString (16)](../../sql-reference/data-types/fixedstring.md). - -``` sql -UUIDStringToNum(String) -``` - -**Valeur renvoyée** - -FixedString (16) - -**Exemples d'utilisation** - -``` sql -SELECT - '612f3c40-5d3b-217e-707b-6a546a3d7b29' AS uuid, - UUIDStringToNum(uuid) AS bytes -``` - -``` text -┌─uuid─────────────────────────────────┬─bytes────────────┐ -│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │ -└──────────────────────────────────────┴──────────────────┘ -``` - -## UUIDNumToString {#uuidnumtostring} - -Accepte un [FixedString (16)](../../sql-reference/data-types/fixedstring.md) valeur, et renvoie une chaîne contenant 36 caractères au format texte. - -``` sql -UUIDNumToString(FixedString(16)) -``` - -**Valeur renvoyée** - -Chaîne. - -**Exemple d'utilisation** - -``` sql -SELECT - 'a/<@];!~p{jTj={)' AS bytes, - UUIDNumToString(toFixedString(bytes, 16)) AS uuid -``` - -``` text -┌─bytes────────────┬─uuid─────────────────────────────────┐ -│ a/<@];!~p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ -└──────────────────┴──────────────────────────────────────┘ -``` - -## Voir Aussi {#see-also} - -- [dictGetUUID](ext-dict-functions.md#ext_dict_functions-other) - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/fr/sql-reference/functions/ym-dict-functions.md b/docs/fr/sql-reference/functions/ym-dict-functions.md deleted file mode 100644 index f1e4461e24a..00000000000 --- a/docs/fr/sql-reference/functions/ym-dict-functions.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 59 -toc_title: Travailler avec Yandex.Dictionnaires Metrica ---- - -# Fonctions pour travailler avec Yandex.Dictionnaires Metrica {#functions-for-working-with-yandex-metrica-dictionaries} - -Pour que les fonctions ci-dessous fonctionnent, la configuration du serveur doit spécifier les chemins et les adresses pour obtenir tous les Yandex.Dictionnaires Metrica. Les dictionnaires sont chargés au premier appel de l'une de ces fonctions. Si les listes de référence ne peuvent pas être chargées, une exception est levée. - -Pour plus d'informations sur la création de listes de références, consultez la section “Dictionaries”. - -## Plusieurs Geobases {#multiple-geobases} - -ClickHouse soutient le travail avec plusieurs géobases alternatives (hiérarchies régionales) simultanément, afin de soutenir diverses perspectives sur les pays auxquels appartiennent certaines régions. - -Le ‘clickhouse-server’ config spécifie le fichier avec l'échelon régional::`/opt/geo/regions_hierarchy.txt` - -Outre ce fichier, il recherche également les fichiers à proximité qui ont le symbole _ et tout suffixe ajouté au nom (avant l'extension de fichier). -Par exemple, il trouvera également le fichier `/opt/geo/regions_hierarchy_ua.txt` si présente. - -`ua` est appelée la clé du dictionnaire. Pour un dictionnaire sans suffixe, la clé est une chaîne vide. - -Tous les dictionnaires sont rechargés dans l'exécution (une fois toutes les secondes, comme défini dans le paramètre de configuration builtin_dictionaries_reload_interval, ou une fois par heure par défaut). Cependant, la liste des dictionnaires disponibles est définie une fois, lorsque le serveur démarre. - -All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. -Exemple: - -``` sql -regionToCountry(RegionID) – Uses the default dictionary: /opt/geo/regions_hierarchy.txt -regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_hierarchy.txt -regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt -``` - -### regionToCity (id \[, geobase\]) {#regiontocityid-geobase} - -Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. - -### regionToArea (id \[, geobase\]) {#regiontoareaid-geobase} - -Convertit une région en une zone (tapez 5 dans la géobase). Dans tous les autres cas, cette fonction est la même que ‘regionToCity’. - -``` sql -SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua')) -FROM system.numbers -LIMIT 15 -``` - -``` text -┌─regionToName(regionToArea(toUInt32(number), \'ua\'))─┐ -│ │ -│ Moscow and Moscow region │ -│ St. Petersburg and Leningrad region │ -│ Belgorod region │ -│ Ivanovsk region │ -│ Kaluga region │ -│ Kostroma region │ -│ Kursk region │ -│ Lipetsk region │ -│ Orlov region │ -│ Ryazan region │ -│ Smolensk region │ -│ Tambov region │ -│ Tver region │ -│ Tula region │ -└──────────────────────────────────────────────────────┘ -``` - -### regionToDistrict(id \[, geobase\]) {#regiontodistrictid-geobase} - -Convertit une région en district fédéral (type 4 dans la géobase). Dans tous les autres cas, cette fonction est la même que ‘regionToCity’. - -``` sql -SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua')) -FROM system.numbers -LIMIT 15 -``` - -``` text -┌─regionToName(regionToDistrict(toUInt32(number), \'ua\'))─┐ -│ │ -│ Central federal district │ -│ Northwest federal district │ -│ South federal district │ -│ North Caucases federal district │ -│ Privolga federal district │ -│ Ural federal district │ -│ Siberian federal district │ -│ Far East federal district │ -│ Scotland │ -│ Faroe Islands │ -│ Flemish region │ -│ Brussels capital region │ -│ Wallonia │ -│ Federation of Bosnia and Herzegovina │ -└──────────────────────────────────────────────────────────┘ -``` - -### regionToCountry (id \[, geobase\]) {#regiontocountryid-geobase} - -Convertit une région en un pays. Dans tous les autres cas, cette fonction est la même que ‘regionToCity’. -Exemple: `regionToCountry(toUInt32(213)) = 225` convertit Moscou (213) en Russie (225). - -### regionToContinent(id \[, géobase\]) {#regiontocontinentid-geobase} - -Convertit une région en continent. Dans tous les autres cas, cette fonction est la même que ‘regionToCity’. -Exemple: `regionToContinent(toUInt32(213)) = 10001` convertit Moscou (213) en Eurasie (10001). - -### regionToTopContinent (#regiontotopcontinent) {#regiontotopcontinent-regiontotopcontinent} - -Trouve le continent le plus élevé dans la hiérarchie de la région. - -**Syntaxe** - -``` sql -regionToTopContinent(id[, geobase]); -``` - -**Paramètre** - -- `id` — Region ID from the Yandex geobase. [UInt32](../../sql-reference/data-types/int-uint.md). -- `geobase` — Dictionary key. See [Plusieurs Geobases](#multiple-geobases). [Chaîne](../../sql-reference/data-types/string.md). Facultatif. - -**Valeur renvoyée** - -- Identifiant du continent de haut niveau (ce dernier lorsque vous grimpez dans la hiérarchie des régions). -- 0, si il n'y a aucun. - -Type: `UInt32`. - -### regionToPopulation (id \[, geobase\]) {#regiontopopulationid-geobase} - -Obtient la population d'une région. -La population peut être enregistrée dans des fichiers avec la géobase. Voir la section “External dictionaries”. -Si la population n'est pas enregistrée pour la région, elle renvoie 0. -Dans la géobase Yandex, la population peut être enregistrée pour les régions enfants, mais pas pour les régions parentes. - -### regionIn(lhs, rhs \[, géobase\]) {#regioninlhs-rhs-geobase} - -Vérifie si un ‘lhs’ région appartient à une ‘rhs’ région. Renvoie un nombre UInt8 égal à 1 s'il appartient, Ou 0 s'il n'appartient pas. -The relationship is reflexive – any region also belongs to itself. - -### regionHierarchy (id \[, geobase\]) {#regionhierarchyid-geobase} - -Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. -Exemple: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. - -### regionToName(id \[, lang\]) {#regiontonameid-lang} - -Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn't exist, an empty string is returned. - -`ua` et `uk` les deux signifient ukrainien. - -[Article Original](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/fr/sql-reference/index.md b/docs/fr/sql-reference/index.md deleted file mode 100644 index 04e44892c05..00000000000 --- a/docs/fr/sql-reference/index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "R\xE9f\xE9rence SQL" -toc_hidden: true -toc_priority: 28 -toc_title: "cach\xE9s" ---- - -# Référence SQL {#sql-reference} - -ClickHouse prend en charge les types de requêtes suivants: - -- [SELECT](statements/select/index.md) -- [INSERT INTO](statements/insert-into.md) -- [CREATE](statements/create.md) -- [ALTER](statements/alter.md#query_language_queries_alter) -- [Autres types de requêtes](statements/misc.md) - -[Article Original](https://clickhouse.tech/docs/en/sql-reference/) diff --git a/docs/fr/sql-reference/operators/in.md b/docs/fr/sql-reference/operators/in.md deleted file mode 100644 index d87fe41a04f..00000000000 --- a/docs/fr/sql-reference/operators/in.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -### Dans les opérateurs {#select-in-operators} - -Le `IN`, `NOT IN`, `GLOBAL IN`, et `GLOBAL NOT IN` les opérateurs sont traitées séparément, car leur fonctionnalité est assez riche. - -Le côté gauche de l'opérateur, soit une seule colonne ou un tuple. - -Exemple: - -``` sql -SELECT UserID IN (123, 456) FROM ... -SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ... -``` - -Si le côté gauche est une colonne unique qui est dans l'index, et le côté droit est un ensemble de constantes, le système utilise l'index pour le traitement de la requête. - -Don't list too many values explicitly (i.e. millions). If a data set is large, put it in a temporary table (for example, see the section “External data for query processing”), puis utiliser une sous-requête. - -Le côté droit de l'opérateur peut être un ensemble d'expressions constantes, un ensemble de tuples avec des expressions constantes (illustrées dans les exemples ci-dessus), ou le nom d'une table de base de données ou une sous-requête SELECT entre parenthèses. - -Si le côté droit de l'opérateur est le nom d'une table (par exemple, `UserID IN users`), ceci est équivalent à la sous-requête `UserID IN (SELECT * FROM users)`. Utilisez ceci lorsque vous travaillez avec des données externes envoyées avec la requête. Par exemple, la requête peut être envoyée avec un ensemble d'ID utilisateur chargés dans le ‘users’ table temporaire, qui doit être filtrée. - -Si le côté droit de l'opérateur est un nom de table qui a le moteur Set (un ensemble de données préparé qui est toujours en RAM), l'ensemble de données ne sera pas créé à nouveau pour chaque requête. - -La sous-requête peut spécifier plusieurs colonnes pour filtrer les tuples. -Exemple: - -``` sql -SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ... -``` - -Les colonnes à gauche et à droite de l'opérateur doit avoir le même type. - -L'opérateur IN et la sous-requête peuvent se produire dans n'importe quelle partie de la requête, y compris dans les fonctions d'agrégation et les fonctions lambda. -Exemple: - -``` sql -SELECT - EventDate, - avg(UserID IN - ( - SELECT UserID - FROM test.hits - WHERE EventDate = toDate('2014-03-17') - )) AS ratio -FROM test.hits -GROUP BY EventDate -ORDER BY EventDate ASC -``` - -``` text -┌──EventDate─┬────ratio─┐ -│ 2014-03-17 │ 1 │ -│ 2014-03-18 │ 0.807696 │ -│ 2014-03-19 │ 0.755406 │ -│ 2014-03-20 │ 0.723218 │ -│ 2014-03-21 │ 0.697021 │ -│ 2014-03-22 │ 0.647851 │ -│ 2014-03-23 │ 0.648416 │ -└────────────┴──────────┘ -``` - -Pour chaque jour après le 17 mars, comptez le pourcentage de pages vues par les utilisateurs qui ont visité le site le 17 mars. -Une sous-requête dans la clause est toujours exécuter une seule fois sur un seul serveur. Il n'y a pas de sous-requêtes dépendantes. - -## Le Traitement NULL {#null-processing-1} - -Pendant le traitement de la demande, l'opérateur n'assume que le résultat d'une opération avec [NULL](../syntax.md#null-literal) est toujours égale à `0` indépendamment de savoir si `NULL` est sur le côté droit ou gauche de l'opérateur. `NULL` les valeurs ne sont incluses dans aucun jeu de données, ne correspondent pas entre elles et ne peuvent pas être comparées. - -Voici un exemple avec le `t_null` table: - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 3 │ -└───┴──────┘ -``` - -L'exécution de la requête `SELECT x FROM t_null WHERE y IN (NULL,3)` vous donne le résultat suivant: - -``` text -┌─x─┐ -│ 2 │ -└───┘ -``` - -Vous pouvez voir que la ligne dans laquelle `y = NULL` est jeté hors de résultats de la requête. C'est parce que ClickHouse ne peut pas décider si `NULL` est inclus dans le `(NULL,3)` ensemble, les retours `0` comme le résultat de l'opération, et `SELECT` exclut cette ligne de la sortie finale. - -``` sql -SELECT y IN (NULL, 3) -FROM t_null -``` - -``` text -┌─in(y, tuple(NULL, 3))─┐ -│ 0 │ -│ 1 │ -└───────────────────────┘ -``` - -## Sous-Requêtes Distribuées {#select-distributed-subqueries} - -Il y a deux options pour IN-S avec des sous-requêtes (similaires aux jointures): normal `IN` / `JOIN` et `GLOBAL IN` / `GLOBAL JOIN`. Ils diffèrent dans la façon dont ils sont exécutés pour le traitement des requêtes distribuées. - -!!! attention "Attention" - Rappelez-vous que les algorithmes décrits ci-dessous peuvent travailler différemment en fonction de la [paramètre](../../operations/settings/settings.md) `distributed_product_mode` paramètre. - -Lors de l'utilisation de l'IN régulier, la requête est envoyée à des serveurs distants, et chacun d'eux exécute les sous-requêtes dans le `IN` ou `JOIN` clause. - -Lors de l'utilisation de `GLOBAL IN` / `GLOBAL JOINs`, d'abord toutes les sous-requêtes sont exécutées pour `GLOBAL IN` / `GLOBAL JOINs`, et les résultats sont recueillis dans des tableaux temporaires. Ensuite, les tables temporaires sont envoyés à chaque serveur distant, où les requêtes sont exécutées à l'aide temporaire de données. - -Pour une requête non distribuée, utilisez `IN` / `JOIN`. - -Soyez prudent lorsque vous utilisez des sous-requêtes dans le `IN` / `JOIN` clauses pour le traitement des requêtes distribuées. - -Regardons quelques exemples. Supposons que chaque serveur du cluster a un **local_table**. Chaque serveur dispose également d'une **table distributed_table** table avec le **Distribué** type, qui regarde tous les serveurs du cluster. - -Pour une requête à l' **table distributed_table**, la requête sera envoyée à tous les serveurs distants et exécutée sur eux en utilisant le **local_table**. - -Par exemple, la requête - -``` sql -SELECT uniq(UserID) FROM distributed_table -``` - -sera envoyé à tous les serveurs distants - -``` sql -SELECT uniq(UserID) FROM local_table -``` - -et l'exécuter sur chacun d'eux en parallèle, jusqu'à ce qu'il atteigne le stade où les résultats intermédiaires peuvent être combinés. Ensuite, les résultats intermédiaires seront retournés au demandeur de serveur et de fusion, et le résultat final sera envoyé au client. - -Examinons maintenant une requête avec IN: - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) -``` - -- Calcul de l'intersection des audiences de deux sites. - -Cette requête sera envoyée à tous les serveurs distants - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) -``` - -En d'autres termes, l'ensemble de données de la clause IN sera collecté sur chaque serveur indépendamment, uniquement à travers les données stockées localement sur chacun des serveurs. - -Cela fonctionnera correctement et de manière optimale si vous êtes prêt pour ce cas et que vous avez réparti les données entre les serveurs de cluster de telle sorte que les données d'un seul ID utilisateur résident entièrement sur un seul serveur. Dans ce cas, toutes les données nécessaires seront disponibles localement sur chaque serveur. Sinon, le résultat sera erroné. Nous nous référons à cette variation de la requête que “local IN”. - -Pour corriger le fonctionnement de la requête lorsque les données sont réparties aléatoirement sur les serveurs de cluster, vous pouvez spécifier **table distributed_table** à l'intérieur d'une sous-requête. La requête ressemblerait à ceci: - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -Cette requête sera envoyée à tous les serveurs distants - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -La sous-requête commencera à s'exécuter sur chaque serveur distant. Étant donné que la sous-requête utilise une table distribuée, la sous-requête qui se trouve sur chaque serveur distant sera renvoyée à chaque serveur distant comme - -``` sql -SELECT UserID FROM local_table WHERE CounterID = 34 -``` - -Par exemple, si vous avez un cluster de 100 SERVEURS, l'exécution de la requête entière nécessitera 10 000 requêtes élémentaires, ce qui est généralement considéré comme inacceptable. - -Dans de tels cas, vous devez toujours utiliser GLOBAL IN au lieu de IN. Voyons comment cela fonctionne pour la requête - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID GLOBAL IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -Le serveur demandeur exécutera la sous requête - -``` sql -SELECT UserID FROM distributed_table WHERE CounterID = 34 -``` - -et le résultat sera mis dans une table temporaire en RAM. Ensuite, la demande sera envoyée à chaque serveur distant - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL IN _data1 -``` - -et la table temporaire `_data1` sera envoyé à chaque serveur distant avec la requête (le nom de la table temporaire est défini par l'implémentation). - -Ceci est plus optimal que d'utiliser la normale dans. Cependant, gardez les points suivants à l'esprit: - -1. Lors de la création d'une table temporaire, les données ne sont pas uniques. Pour réduire le volume de données transmises sur le réseau, spécifiez DISTINCT dans la sous-requête. (Vous n'avez pas besoin de le faire pour un IN normal.) -2. La table temporaire sera envoyé à tous les serveurs distants. La Transmission ne tient pas compte de la topologie du réseau. Par exemple, si 10 serveurs distants résident dans un centre de données très distant par rapport au serveur demandeur, les données seront envoyées 10 fois sur le canal au centre de données distant. Essayez d'éviter les grands ensembles de données lorsque vous utilisez GLOBAL IN. -3. Lors de la transmission de données à des serveurs distants, les restrictions sur la bande passante réseau ne sont pas configurables. Vous pourriez surcharger le réseau. -4. Essayez de distribuer les données entre les serveurs afin que vous n'ayez pas besoin D'utiliser GLOBAL IN sur une base régulière. -5. Si vous devez utiliser GLOBAL in souvent, planifiez l'emplacement du cluster ClickHouse de sorte qu'un seul groupe de répliques ne réside pas dans plus d'un centre de données avec un réseau rapide entre eux, de sorte qu'une requête puisse être traitée entièrement dans un seul centre de données. - -Il est également judicieux de spécifier une table locale dans le `GLOBAL IN` clause, dans le cas où cette table locale est uniquement disponible sur le serveur demandeur et que vous souhaitez utiliser les données de celui-ci sur des serveurs distants. diff --git a/docs/fr/sql-reference/operators/index.md b/docs/fr/sql-reference/operators/index.md deleted file mode 100644 index 1635c7eece3..00000000000 --- a/docs/fr/sql-reference/operators/index.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 37 -toc_title: "Op\xE9rateur" ---- - -# Opérateur {#operators} - -ClickHouse transforme les opérateurs en leurs fonctions correspondantes à l'étape d'analyse des requêtes en fonction de leur priorité, de leur priorité et de leur associativité. - -## Des Opérateurs D'Accès {#access-operators} - -`a[N]` – Access to an element of an array. The `arrayElement(a, N)` fonction. - -`a.N` – Access to a tuple element. The `tupleElement(a, N)` fonction. - -## Opérateur De Négation Numérique {#numeric-negation-operator} - -`-a` – The `negate (a)` fonction. - -## Opérateurs de Multiplication et de Division {#multiplication-and-division-operators} - -`a * b` – The `multiply (a, b)` fonction. - -`a / b` – The `divide(a, b)` fonction. - -`a % b` – The `modulo(a, b)` fonction. - -## Opérateurs d'Addition et de soustraction {#addition-and-subtraction-operators} - -`a + b` – The `plus(a, b)` fonction. - -`a - b` – The `minus(a, b)` fonction. - -## Opérateurs De Comparaison {#comparison-operators} - -`a = b` – The `equals(a, b)` fonction. - -`a == b` – The `equals(a, b)` fonction. - -`a != b` – The `notEquals(a, b)` fonction. - -`a <> b` – The `notEquals(a, b)` fonction. - -`a <= b` – The `lessOrEquals(a, b)` fonction. - -`a >= b` – The `greaterOrEquals(a, b)` fonction. - -`a < b` – The `less(a, b)` fonction. - -`a > b` – The `greater(a, b)` fonction. - -`a LIKE s` – The `like(a, b)` fonction. - -`a NOT LIKE s` – The `notLike(a, b)` fonction. - -`a BETWEEN b AND c` – The same as `a >= b AND a <= c`. - -`a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. - -## Opérateurs pour travailler avec des ensembles de données {#operators-for-working-with-data-sets} - -*Voir [Dans les opérateurs](in.md).* - -`a IN ...` – The `in(a, b)` fonction. - -`a NOT IN ...` – The `notIn(a, b)` fonction. - -`a GLOBAL IN ...` – The `globalIn(a, b)` fonction. - -`a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` fonction. - -## Opérateurs pour travailler avec des Dates et des heures {#operators-datetime} - -### EXTRACT {#operator-extract} - -``` sql -EXTRACT(part FROM date); -``` - -Extraire des parties d'une date donnée. Par exemple, vous pouvez récupérer un mois à partir d'une date donnée, ou d'une seconde à partir d'un moment. - -Le `part` paramètre spécifie la partie de la date à récupérer. Les valeurs suivantes sont disponibles: - -- `DAY` — The day of the month. Possible values: 1–31. -- `MONTH` — The number of a month. Possible values: 1–12. -- `YEAR` — The year. -- `SECOND` — The second. Possible values: 0–59. -- `MINUTE` — The minute. Possible values: 0–59. -- `HOUR` — The hour. Possible values: 0–23. - -Le `part` le paramètre est insensible à la casse. - -Le `date` paramètre spécifie la date ou l'heure à traiter. Soit [Date](../../sql-reference/data-types/date.md) ou [DateTime](../../sql-reference/data-types/datetime.md) le type est pris en charge. - -Exemple: - -``` sql -SELECT EXTRACT(DAY FROM toDate('2017-06-15')); -SELECT EXTRACT(MONTH FROM toDate('2017-06-15')); -SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); -``` - -Dans l'exemple suivant, nous créons un tableau et de les insérer dans une valeur avec le `DateTime` type. - -``` sql -CREATE TABLE test.Orders -( - OrderId UInt64, - OrderName String, - OrderDate DateTime -) -ENGINE = Log; -``` - -``` sql -INSERT INTO test.Orders VALUES (1, 'Jarlsberg Cheese', toDateTime('2008-10-11 13:23:44')); -``` - -``` sql -SELECT - toYear(OrderDate) AS OrderYear, - toMonth(OrderDate) AS OrderMonth, - toDayOfMonth(OrderDate) AS OrderDay, - toHour(OrderDate) AS OrderHour, - toMinute(OrderDate) AS OrderMinute, - toSecond(OrderDate) AS OrderSecond -FROM test.Orders; -``` - -``` text -┌─OrderYear─┬─OrderMonth─┬─OrderDay─┬─OrderHour─┬─OrderMinute─┬─OrderSecond─┐ -│ 2008 │ 10 │ 11 │ 13 │ 23 │ 44 │ -└───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ -``` - -Vous pouvez voir plus d'exemples de [test](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). - -### INTERVAL {#operator-interval} - -Crée un [Intervalle](../../sql-reference/data-types/special-data-types/interval.md)- valeur de type qui doit être utilisée dans les opérations arithmétiques avec [Date](../../sql-reference/data-types/date.md) et [DateTime](../../sql-reference/data-types/datetime.md)-type de valeurs. - -Types d'intervalles: -- `SECOND` -- `MINUTE` -- `HOUR` -- `DAY` -- `WEEK` -- `MONTH` -- `QUARTER` -- `YEAR` - -!!! warning "Avertissement" - Les intervalles avec différents types ne peuvent pas être combinés. Vous ne pouvez pas utiliser des expressions comme `INTERVAL 4 DAY 1 HOUR`. Spécifiez des intervalles en unités inférieures ou égales à la plus petite unité de l'intervalle, par exemple, `INTERVAL 25 HOUR`. Vous pouvez utiliser les opérations consécutives, comme dans l'exemple ci-dessous. - -Exemple: - -``` sql -SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR -``` - -``` text -┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ -│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ -└─────────────────────┴────────────────────────────────────────────────────────┘ -``` - -**Voir Aussi** - -- [Intervalle](../../sql-reference/data-types/special-data-types/interval.md) type de données -- [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type fonctions de conversion - -## Opérateur De Négation Logique {#logical-negation-operator} - -`NOT a` – The `not(a)` fonction. - -## Logique ET de l'Opérateur {#logical-and-operator} - -`a AND b` – The`and(a, b)` fonction. - -## Logique ou opérateur {#logical-or-operator} - -`a OR b` – The `or(a, b)` fonction. - -## Opérateur Conditionnel {#conditional-operator} - -`a ? b : c` – The `if(a, b, c)` fonction. - -Note: - -L'opérateur conditionnel calcule les valeurs de b et c, puis vérifie si la condition a est remplie, puis renvoie la valeur correspondante. Si `b` ou `C` est un [arrayJoin()](../../sql-reference/functions/array-join.md#functions_arrayjoin) fonction, chaque ligne sera répliquée indépendamment de la “a” condition. - -## Expression Conditionnelle {#operator_case} - -``` sql -CASE [x] - WHEN a THEN b - [WHEN ... THEN ...] - [ELSE c] -END -``` - -Si `x` est spécifié, alors `transform(x, [a, ...], [b, ...], c)` function is used. Otherwise – `multiIf(a, b, ..., c)`. - -Si il n'y a pas de `ELSE c` dans l'expression, la valeur par défaut est `NULL`. - -Le `transform` la fonction ne fonctionne pas avec `NULL`. - -## Opérateur De Concaténation {#concatenation-operator} - -`s1 || s2` – The `concat(s1, s2) function.` - -## Opérateur De Création Lambda {#lambda-creation-operator} - -`x -> expr` – The `lambda(x, expr) function.` - -Les opérateurs suivants n'ont pas de priorité puisqu'ils sont des parenthèses: - -## Opérateur De Création De Tableau {#array-creation-operator} - -`[x1, ...]` – The `array(x1, ...) function.` - -## Opérateur De Création De Tuple {#tuple-creation-operator} - -`(x1, x2, ...)` – The `tuple(x2, x2, ...) function.` - -## Associativité {#associativity} - -Tous les opérateurs binaires ont associativité gauche. Exemple, `1 + 2 + 3` est transformé à `plus(plus(1, 2), 3)`. -Parfois, cela ne fonctionne pas de la façon que vous attendez. Exemple, `SELECT 4 > 2 > 3` résultat sera 0. - -Pour l'efficacité, le `and` et `or` les fonctions acceptent n'importe quel nombre d'arguments. Les chaînes de `AND` et `OR` les opérateurs se sont transformés en un seul appel de ces fonctions. - -## La vérification de `NULL` {#checking-for-null} - -Clickhouse soutient le `IS NULL` et `IS NOT NULL` opérateur. - -### IS NULL {#operator-is-null} - -- Pour [Nullable](../../sql-reference/data-types/nullable.md) type de valeurs, l' `IS NULL` opérateur retourne: - - `1` si la valeur est `NULL`. - - `0` autrement. -- Pour les autres valeurs, la `IS NULL` l'opérateur renvoie toujours `0`. - - - -``` sql -SELECT x+100 FROM t_null WHERE y IS NULL -``` - -``` text -┌─plus(x, 100)─┐ -│ 101 │ -└──────────────┘ -``` - -### IS NOT NULL {#is-not-null} - -- Pour [Nullable](../../sql-reference/data-types/nullable.md) type de valeurs, l' `IS NOT NULL` opérateur retourne: - - `0` si la valeur est `NULL`. - - `1` autrement. -- Pour les autres valeurs, la `IS NOT NULL` l'opérateur renvoie toujours `1`. - - - -``` sql -SELECT * FROM t_null WHERE y IS NOT NULL -``` - -``` text -┌─x─┬─y─┐ -│ 2 │ 3 │ -└───┴───┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/fr/sql-reference/statements/alter.md b/docs/fr/sql-reference/statements/alter.md deleted file mode 100644 index 64fe21046a3..00000000000 --- a/docs/fr/sql-reference/statements/alter.md +++ /dev/null @@ -1,602 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 36 -toc_title: ALTER ---- - -## ALTER {#query_language_queries_alter} - -Le `ALTER` la requête est prise en charge uniquement pour `*MergeTree` des tables, ainsi que `Merge`et`Distributed`. La requête a plusieurs variantes. - -### Manipulations De Colonne {#column-manipulations} - -Modification de la structure de la table. - -``` sql -ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... -``` - -Dans la requête, spécifiez une liste d'une ou plusieurs actions séparées par des virgules. -Chaque action est une opération sur une colonne. - -Les actions suivantes sont prises en charge: - -- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. -- [DROP COLUMN](#alter_drop-column) — Deletes the column. -- [CLEAR COLUMN](#alter_clear-column) — Resets column values. -- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. -- [MODIFY COLUMN](#alter_modify-column) — Changes column's type, default expression and TTL. - -Ces actions sont décrites en détail ci-dessous. - -#### ADD COLUMN {#alter_add-column} - -``` sql -ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] -``` - -Ajoute une nouvelle colonne à la table spécifiée `name`, `type`, [`codec`](create.md#codecs) et `default_expr` (voir la section [Expressions par défaut](create.md#create-default-values)). - -Si l' `IF NOT EXISTS` la clause est incluse, la requête ne retournera pas d'erreur si la colonne existe déjà. Si vous spécifiez `AFTER name_after` (le nom d'une autre colonne), la colonne est ajoutée après celle spécifiée dans la liste des colonnes de la table. Sinon, la colonne est ajoutée à la fin de la table. Notez qu'il n'existe aucun moyen d'ajouter une colonne au début d'un tableau. Pour une chaîne d'actions, `name_after` peut être le nom d'une colonne est ajoutée dans l'une des actions précédentes. - -L'ajout d'une colonne modifie simplement la structure de la table, sans effectuer d'actions avec des données. Les données n'apparaissent pas sur le disque après la `ALTER`. Si les données sont manquantes pour une colonne lors de la lecture de la table, elles sont remplies avec des valeurs par défaut (en exécutant l'expression par défaut s'il y en a une, ou en utilisant des zéros ou des chaînes vides). La colonne apparaît sur le disque après la fusion des parties de données (voir [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md)). - -Cette approche nous permet de compléter le `ALTER` requête instantanément, sans augmenter le volume de données anciennes. - -Exemple: - -``` sql -ALTER TABLE visits ADD COLUMN browser String AFTER user_id -``` - -#### DROP COLUMN {#alter_drop-column} - -``` sql -DROP COLUMN [IF EXISTS] name -``` - -Supprime la colonne avec le nom `name`. Si l' `IF EXISTS` la clause est spécifiée, la requête ne retournera pas d'erreur si la colonne n'existe pas. - -Supprime les données du système de fichiers. Comme cela supprime des fichiers entiers, la requête est terminée presque instantanément. - -Exemple: - -``` sql -ALTER TABLE visits DROP COLUMN browser -``` - -#### CLEAR COLUMN {#alter_clear-column} - -``` sql -CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name -``` - -Réinitialise toutes les données dans une colonne pour une partition spécifiée. En savoir plus sur la définition du nom de la partition dans la section [Comment spécifier l'expression de partition](#alter-how-to-specify-part-expr). - -Si l' `IF EXISTS` la clause est spécifiée, la requête ne retournera pas d'erreur si la colonne n'existe pas. - -Exemple: - -``` sql -ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() -``` - -#### COMMENT COLUMN {#alter_comment-column} - -``` sql -COMMENT COLUMN [IF EXISTS] name 'comment' -``` - -Ajoute un commentaire à la colonne. Si l' `IF EXISTS` la clause est spécifiée, la requête ne retournera pas d'erreur si la colonne n'existe pas. - -Chaque colonne peut avoir un commentaire. Si un commentaire existe déjà pour la colonne, un nouveau commentaire remplace le précédent commentaire. - -Les commentaires sont stockés dans le `comment_expression` colonne renvoyée par le [DESCRIBE TABLE](misc.md#misc-describe-table) requête. - -Exemple: - -``` sql -ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' -``` - -#### MODIFY COLUMN {#alter_modify-column} - -``` sql -MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] -``` - -Cette requête modifie le `name` les propriétés de la colonne: - -- Type - -- Expression par défaut - -- TTL - - For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl). - -Si l' `IF EXISTS` la clause est spécifiée, la requête ne retournera pas d'erreur si la colonne n'existe pas. - -Lors de la modification du type, les valeurs sont converties comme si [toType](../../sql-reference/functions/type-conversion-functions.md) les fonctions ont été appliquées. Si seule l'expression par défaut est modifiée, la requête ne fait rien de complexe et est terminée presque instantanément. - -Exemple: - -``` sql -ALTER TABLE visits MODIFY COLUMN browser Array(String) -``` - -Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. - -Il y a plusieurs étapes de traitement: - -- Préparation de (nouveaux) fichiers temporaires avec des données modifiées. -- Renommer les anciens fichiers. -- Renommer les (nouveaux) fichiers temporaires en anciens noms. -- Suppression des anciens fichiers. - -Seule la première étape prend du temps. Si il y a un échec à ce stade, les données ne sont pas modifiées. -En cas d'échec au cours d'une des étapes successives, les données peuvent être restaurées manuellement. L'exception est si les anciens fichiers ont été supprimés du système de fichiers mais que les données des nouveaux fichiers n'ont pas été écrites sur le disque et ont été perdues. - -Le `ALTER` la requête de modification des colonnes est répliquée. Les instructions sont enregistrées dans ZooKeeper, puis chaque réplique les applique. Tout `ALTER` les requêtes sont exécutées dans le même ordre. La requête attend que les actions appropriées soient terminées sur les autres répliques. Cependant, une requête pour modifier des colonnes dans une table répliquée peut être interrompue, et toutes les actions seront effectuées de manière asynchrone. - -#### Modifier les limites de la requête {#alter-query-limitations} - -Le `ALTER` query vous permet de créer et de supprimer des éléments distincts (colonnes) dans des structures de données imbriquées, mais pas des structures de données imbriquées entières. Pour ajouter une structure de données imbriquée, vous pouvez ajouter des colonnes avec un nom comme `name.nested_name` et le type `Array(T)`. Une structure de données imbriquée est équivalente à plusieurs colonnes de tableau avec un nom qui a le même préfixe avant le point. - -Il n'y a pas de support pour supprimer des colonnes dans la clé primaire ou la clé d'échantillonnage (colonnes qui sont utilisées dans le `ENGINE` expression). La modification du type des colonnes incluses dans la clé primaire n'est possible que si cette modification n'entraîne pas la modification des données (par exemple, vous êtes autorisé à ajouter des valeurs à une énumération ou à modifier un type de `DateTime` de `UInt32`). - -Si l' `ALTER` la requête n'est pas suffisante pour apporter les modifications de table dont vous avez besoin, vous pouvez créer une nouvelle table, y copier les données en utilisant le [INSERT SELECT](insert-into.md#insert_query_insert-select) requête, puis changer les tables en utilisant le [RENAME](misc.md#misc_operations-rename) requête et supprimer l'ancienne table. Vous pouvez utiliser l' [clickhouse-copieur](../../operations/utilities/clickhouse-copier.md) comme une alternative à la `INSERT SELECT` requête. - -Le `ALTER` query bloque toutes les lectures et écritures pour la table. En d'autres termes, si une longue `SELECT` est en cours d'exécution au moment de la `ALTER` requête, la `ALTER` la requête va attendre qu'elle se termine. Dans le même temps, toutes les nouvelles requêtes à la même table attendre que ce `ALTER` est en cours d'exécution. - -Pour les tables qui ne stockent pas les données elles-mêmes (telles que `Merge` et `Distributed`), `ALTER` change simplement la structure de la table, et ne change pas la structure des tables subordonnées. Par exemple, lors de L'exécution de ALTER pour un `Distributed` table, vous devrez également exécuter `ALTER` pour les tables sur tous les serveurs distants. - -### Manipulations avec des Expressions clés {#manipulations-with-key-expressions} - -La commande suivante est prise en charge: - -``` sql -MODIFY ORDER BY new_expression -``` - -Cela ne fonctionne que pour les tables du [`MergeTree`](../../engines/table-engines/mergetree-family/mergetree.md) de la famille (y compris les -[répliqué](../../engines/table-engines/mergetree-family/replication.md) table). La commande change l' -[clé de tri](../../engines/table-engines/mergetree-family/mergetree.md) de la table -de `new_expression` (une expression ou un tuple d'expressions). Clé primaire reste le même. - -La commande est légère en ce sens qu'elle ne modifie que les métadonnées. Pour conserver la propriété cette partie de données -les lignes sont ordonnées par l'expression de clé de tri vous ne pouvez pas ajouter d'expressions contenant des colonnes existantes -à la clé de tri (seules les colonnes ajoutées par `ADD COLUMN` commande dans le même `ALTER` requête). - -### Manipulations avec des Indices de saut de données {#manipulations-with-data-skipping-indices} - -Cela ne fonctionne que pour les tables du [`*MergeTree`](../../engines/table-engines/mergetree-family/mergetree.md) de la famille (y compris les -[répliqué](../../engines/table-engines/mergetree-family/replication.md) table). Les opérations suivantes -sont disponibles: - -- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Ajoute la description de l'index aux métadonnées des tables. - -- `ALTER TABLE [db].name DROP INDEX name` - Supprime la description de l'index des métadonnées des tables et supprime les fichiers d'index du disque. - -Ces commandes sont légères dans le sens où elles ne modifient que les métadonnées ou suppriment des fichiers. -En outre, ils sont répliqués (synchronisation des métadonnées des indices via ZooKeeper). - -### Manipulations avec contraintes {#manipulations-with-constraints} - -En voir plus sur [contraintes](create.md#constraints) - -Les contraintes peuvent être ajoutées ou supprimées à l'aide de la syntaxe suivante: - -``` sql -ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; -ALTER TABLE [db].name DROP CONSTRAINT constraint_name; -``` - -Les requêtes ajouteront ou supprimeront des métadonnées sur les contraintes de la table afin qu'elles soient traitées immédiatement. - -Contrainte de vérifier *ne sera pas exécuté* sur les données existantes si elle a été ajoutée. - -Toutes les modifications sur les tables répliquées sont diffusées sur ZooKeeper et seront donc appliquées sur d'autres répliques. - -### Manipulations avec des Partitions et des pièces {#alter_manipulations-with-partitions} - -Les opérations suivantes avec [partition](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) sont disponibles: - -- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` répertoire et de l'oublier. -- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. -- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` répertoire à la table. -- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. -- [REPLACE PARTITION](#alter_replace-partition) - Copie la partition de données d'une table à l'autre et la remplace. -- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition)(#alter_move_to_table-partition) - déplace la partition de données d'une table à l'autre. -- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) - Rétablit la valeur d'une colonne spécifiée dans une partition. -- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) - Réinitialise l'index secondaire spécifié dans une partition. -- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. -- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. -- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. - - - -#### DETACH PARTITION {#alter_detach-partition} - -``` sql -ALTER TABLE table_name DETACH PARTITION partition_expr -``` - -Déplace toutes les données de la partition spécifiée vers `detached` répertoire. Le serveur oublie la partition de données détachée comme si elle n'existait pas. Le serveur ne connaîtra pas ces données tant que vous n'aurez pas [ATTACH](#alter_attach-partition) requête. - -Exemple: - -``` sql -ALTER TABLE visits DETACH PARTITION 201901 -``` - -Lisez à propos de la définition de l'expression de partition dans une section [Comment spécifier l'expression de partition](#alter-how-to-specify-part-expr). - -Une fois la requête exécutée, vous pouvez faire ce que vous voulez avec les données du `detached` directory — delete it from the file system, or just leave it. - -This query is replicated – it moves the data to the `detached` répertoire sur toutes les répliques. Notez que vous ne pouvez exécuter cette requête que sur un réplica leader. Pour savoir si une réplique est un leader, effectuez le `SELECT` requête à l' [système.réplique](../../operations/system-tables.md#system_tables-replicas) table. Alternativement, il est plus facile de faire une `DETACH` requête sur toutes les répliques - toutes les répliques lancent une exception, à l'exception de la réplique leader. - -#### DROP PARTITION {#alter_drop-partition} - -``` sql -ALTER TABLE table_name DROP PARTITION partition_expr -``` - -Supprime la partition spécifiée de la table. Cette requête marque la partition comme inactive et supprime complètement les données, environ en 10 minutes. - -Lisez à propos de la définition de l'expression de partition dans une section [Comment spécifier l'expression de partition](#alter-how-to-specify-part-expr). - -The query is replicated – it deletes data on all replicas. - -#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} - -``` sql -ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr -``` - -Supprime la partie spécifiée ou toutes les parties de la partition spécifiée de `detached`. -En savoir plus sur la définition de l'expression de partition dans une section [Comment spécifier l'expression de partition](#alter-how-to-specify-part-expr). - -#### ATTACH PARTITION\|PART {#alter_attach-partition} - -``` sql -ALTER TABLE table_name ATTACH PARTITION|PART partition_expr -``` - -Ajoute des données à la table à partir du `detached` répertoire. Il est possible d'ajouter des données dans une partition entière ou pour une partie distincte. Exemple: - -``` sql -ALTER TABLE visits ATTACH PARTITION 201901; -ALTER TABLE visits ATTACH PART 201901_2_2_0; -``` - -En savoir plus sur la définition de l'expression de partition dans une section [Comment spécifier l'expression de partition](#alter-how-to-specify-part-expr). - -Cette requête est répliquée. L'initiateur de réplica vérifie s'il y a des données dans le `detached` répertoire. Si des données existent, la requête vérifie son intégrité. Si tout est correct, la requête ajoute les données à la table. Tous les autres réplicas téléchargent les données de l'initiateur de réplica. - -Ainsi, vous pouvez mettre des données à la `detached` répertoire sur une réplique, et utilisez le `ALTER ... ATTACH` requête pour l'ajouter à la table sur tous les réplicas. - -#### ATTACH PARTITION FROM {#alter_attach-partition-from} - -``` sql -ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 -``` - -Cette requête copie la partition de données du `table1` de `table2` ajoute des données de gratuit dans la `table2`. Notez que les données ne seront pas supprimées de `table1`. - -Pour que la requête s'exécute correctement, les conditions suivantes doivent être remplies: - -- Les deux tables doivent avoir la même structure. -- Les deux tables doivent avoir la même clé de partition. - -#### REPLACE PARTITION {#alter_replace-partition} - -``` sql -ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 -``` - -Cette requête copie la partition de données du `table1` de `table2` et remplace la partition existante dans le `table2`. Notez que les données ne seront pas supprimées de `table1`. - -Pour que la requête s'exécute correctement, les conditions suivantes doivent être remplies: - -- Les deux tables doivent avoir la même structure. -- Les deux tables doivent avoir la même clé de partition. - -#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} - -``` sql -ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest -``` - -Cette requête déplace la partition de données du `table_source` de `table_dest` avec la suppression des données de `table_source`. - -Pour que la requête s'exécute correctement, les conditions suivantes doivent être remplies: - -- Les deux tables doivent avoir la même structure. -- Les deux tables doivent avoir la même clé de partition. -- Les deux tables doivent appartenir à la même famille de moteurs. (répliqué ou non répliqué) -- Les deux tables doivent avoir la même stratégie de stockage. - -#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} - -``` sql -ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr -``` - -Réinitialise toutes les valeurs de la colonne spécifiée dans une partition. Si l' `DEFAULT` la clause a été déterminée lors de la création d'une table, cette requête définit la valeur de la colonne à une valeur par défaut spécifiée. - -Exemple: - -``` sql -ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 -``` - -#### FREEZE PARTITION {#alter_freeze-partition} - -``` sql -ALTER TABLE table_name FREEZE [PARTITION partition_expr] -``` - -Cette requête crée une sauvegarde locale d'une partition spécifiée. Si l' `PARTITION` la clause est omise, la requête crée la sauvegarde de toutes les partitions à la fois. - -!!! note "Note" - L'ensemble du processus de sauvegarde est effectuée sans arrêter le serveur. - -Notez que pour les tables de style ancien, vous pouvez spécifier le préfixe du nom de la partition (par exemple, ‘2019’)- ensuite, la requête crée la sauvegarde pour toutes les partitions correspondantes. Lisez à propos de la définition de l'expression de partition dans une section [Comment spécifier l'expression de partition](#alter-how-to-specify-part-expr). - -Au moment de l'exécution, pour un instantané de données, la requête crée des liens rigides vers des données de table. Les liens sont placés dans le répertoire `/var/lib/clickhouse/shadow/N/...`, où: - -- `/var/lib/clickhouse/` est le répertoire de travail clickhouse spécifié dans la configuration. -- `N` est le numéro incrémental de la sauvegarde. - -!!! note "Note" - Si vous utilisez [un ensemble de disques pour le stockage des données dans une table](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes), le `shadow/N` le répertoire apparaît sur chaque disque, stockant les parties de données correspondant `PARTITION` expression. - -La même structure de répertoires est créée à l'intérieur de la sauvegarde qu'à l'intérieur `/var/lib/clickhouse/`. La requête effectue ‘chmod’ pour tous les fichiers, interdisant d'écrire en eux. - -Après avoir créé la sauvegarde, vous pouvez copier les données depuis `/var/lib/clickhouse/shadow/` sur le serveur distant, puis supprimez-le du serveur local. Notez que l' `ALTER t FREEZE PARTITION` la requête n'est pas répliqué. Il crée une sauvegarde locale uniquement sur le serveur local. - -La requête crée une sauvegarde presque instantanément (mais elle attend d'abord que les requêtes en cours à la table correspondante se terminent). - -`ALTER TABLE t FREEZE PARTITION` copie uniquement les données, pas les métadonnées de la table. Faire une sauvegarde des métadonnées de la table, copiez le fichier `/var/lib/clickhouse/metadata/database/table.sql` - -Pour restaurer des données à partir d'une sauvegarde, procédez comme suit: - -1. Créer la table si elle n'existe pas. Pour afficher la requête, utilisez la .fichier sql (remplacer `ATTACH` avec `CREATE`). -2. Copier les données de la `data/database/table/` répertoire à l'intérieur de la sauvegarde `/var/lib/clickhouse/data/database/table/detached/` répertoire. -3. Exécuter `ALTER TABLE t ATTACH PARTITION` les requêtes pour ajouter les données à une table. - -La restauration à partir d'une sauvegarde ne nécessite pas l'arrêt du serveur. - -Pour plus d'informations sur les sauvegardes et la restauration [La Sauvegarde Des Données](../../operations/backup.md) section. - -#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} - -``` sql -ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr -``` - -La requête fonctionne de manière similaire à `CLEAR COLUMN` mais il remet un index au lieu d'une colonne de données. - -#### FETCH PARTITION {#alter_fetch-partition} - -``` sql -ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' -``` - -Télécharge une partition depuis un autre serveur. Cette requête ne fonctionne que pour les tables répliquées. - -La requête effectue les opérations suivantes: - -1. Télécharge la partition à partir du fragment spécifié. Dans ‘path-in-zookeeper’ vous devez spécifier un chemin vers le fragment dans ZooKeeper. -2. Ensuite, la requête met les données téléchargées dans le `detached` répertoire de la `table_name` table. L'utilisation de la [ATTACH PARTITION\|PART](#alter_attach-partition) requête pour ajouter les données à la table. - -Exemple: - -``` sql -ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; -ALTER TABLE users ATTACH PARTITION 201902; -``` - -Notez que: - -- Le `ALTER ... FETCH PARTITION` la requête n'est pas répliqué. Il place la partition à la `detached` répertoire sur le serveur local. -- Le `ALTER TABLE ... ATTACH` la requête est répliquée. Il ajoute les données à toutes les répliques. Les données sont ajoutées à l'une des répliques `detached` répertoire, et aux autres-des répliques voisines. - -Avant le téléchargement, le système vérifie si la partition existe et si la structure de la table correspond. La réplique la plus appropriée est sélectionnée automatiquement parmi les répliques saines. - -Bien que la requête soit appelée `ALTER TABLE`, il ne modifie pas la structure de la table et ne modifie pas immédiatement les données disponibles dans la table. - -#### MOVE PARTITION\|PART {#alter_move-partition} - -Déplace des partitions ou des parties de données vers un autre volume ou disque pour `MergeTree`-tables de moteur. Voir [Utilisation de plusieurs périphériques de bloc pour le stockage de données](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). - -``` sql -ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' -``` - -Le `ALTER TABLE t MOVE` requête: - -- Non répliqué, car différentes répliques peuvent avoir des stratégies de stockage différentes. -- Renvoie une erreur si le disque ou le volume n'est pas configuré. Query renvoie également une erreur si les conditions de déplacement des données, spécifiées dans la stratégie de stockage, ne peuvent pas être appliquées. -- Peut renvoyer une erreur dans le cas, lorsque les données à déplacer sont déjà déplacées par un processus en arrière-plan, simultané `ALTER TABLE t MOVE` requête ou à la suite de la fusion de données d'arrière-plan. Un utilisateur ne doit effectuer aucune action supplémentaire dans ce cas. - -Exemple: - -``` sql -ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' -ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' -``` - -#### Comment définir L'Expression de la Partition {#alter-how-to-specify-part-expr} - -Vous pouvez spécifier l'expression de partition dans `ALTER ... PARTITION` requêtes de différentes manières: - -- Comme une valeur de l' `partition` la colonne de la `system.parts` table. Exemple, `ALTER TABLE visits DETACH PARTITION 201901`. -- Comme expression de la colonne de la table. Les constantes et les expressions constantes sont prises en charge. Exemple, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. -- À l'aide de l'ID de partition. Partition ID est un identifiant de chaîne de la partition (lisible par l'homme, si possible) qui est utilisé comme noms de partitions dans le système de fichiers et dans ZooKeeper. L'ID de partition doit être spécifié dans `PARTITION ID` clause, entre guillemets simples. Exemple, `ALTER TABLE visits DETACH PARTITION ID '201901'`. -- Dans le [ALTER ATTACH PART](#alter_attach-partition) et [DROP DETACHED PART](#alter_drop-detached) requête, pour spécifier le nom d'une partie, utilisez le littéral de chaîne avec une valeur de `name` la colonne de la [système.detached_parts](../../operations/system-tables.md#system_tables-detached_parts) table. Exemple, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. - -L'utilisation de guillemets lors de la spécification de la partition dépend du type d'expression de partition. Par exemple, pour la `String` type, vous devez spécifier son nom entre guillemets (`'`). Pour l' `Date` et `Int*` types aucune citation n'est nécessaire. - -Pour les tables de style ancien, vous pouvez spécifier la partition sous forme de nombre `201901` ou une chaîne de caractères `'201901'`. La syntaxe des tables new-style est plus stricte avec les types (similaire à l'analyseur pour le format D'entrée des valeurs). - -Toutes les règles ci-dessus sont aussi valables pour la [OPTIMIZE](misc.md#misc_operations-optimize) requête. Si vous devez spécifier la seule partition lors de l'optimisation d'une table non partitionnée, définissez l'expression `PARTITION tuple()`. Exemple: - -``` sql -OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; -``` - -Les exemples de `ALTER ... PARTITION` les requêtes sont démontrées dans les tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) et [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). - -### Manipulations avec Table TTL {#manipulations-with-table-ttl} - -Vous pouvez modifier [tableau TTL](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) avec une demande du formulaire suivant: - -``` sql -ALTER TABLE table-name MODIFY TTL ttl-expression -``` - -### Synchronicité des requêtes ALTER {#synchronicity-of-alter-queries} - -Pour les tables non réplicables, tous `ALTER` les requêtes sont exécutées simultanément. Pour les tables réplicables, la requête ajoute simplement des instructions pour les actions appropriées à `ZooKeeper` et les actions elles-mêmes sont effectuées dès que possible. Cependant, la requête peut attendre que ces actions soient terminées sur tous les réplicas. - -Pour `ALTER ... ATTACH|DETACH|DROP` les requêtes, vous pouvez utiliser le `replication_alter_partitions_sync` configuration pour configurer l'attente. -Valeurs possibles: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. - -### Mutation {#alter-mutations} - -Les Mutations sont une variante ALTER query qui permet de modifier ou de supprimer des lignes dans une table. Contrairement à la norme `UPDATE` et `DELETE` les requêtes qui sont destinées aux changements de données de point, les mutations sont destinées aux opérations lourdes qui modifient beaucoup de lignes dans une table. Pris en charge pour le `MergeTree` famille de moteurs de table, y compris les moteurs avec support de réplication. - -Les tables existantes sont prêtes pour les mutations telles quelles (aucune conversion nécessaire), mais après l'application de la première mutation à une table, son format de métadonnées devient incompatible avec les versions précédentes du serveur et il devient impossible de revenir à une version précédente. - -Commandes actuellement disponibles: - -``` sql -ALTER TABLE [db.]table DELETE WHERE filter_expr -``` - -Le `filter_expr` doit être de type `UInt8`. La requête supprime les lignes de la table pour lesquelles cette expression prend une valeur différente de zéro. - -``` sql -ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr -``` - -Le `filter_expr` doit être de type `UInt8`. Cette requête met à jour les valeurs des colonnes spécifiées en les valeurs des expressions correspondantes dans les lignes pour lesquelles `filter_expr` prend une valeur non nulle. Les valeurs sont converties en type de colonne à l'aide `CAST` opérateur. La mise à jour des colonnes utilisées dans le calcul de la clé primaire ou de la clé de partition n'est pas prise en charge. - -``` sql -ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name -``` - -La requête reconstruit l'index secondaire `name` dans la partition `partition_name`. - -Une requête peut contenir plusieurs commandes séparées par des virgules. - -Pour les tables \* MergeTree, les mutations s'exécutent en réécrivant des parties de données entières. Il n'y a pas d'atomicité-les pièces sont substituées aux pièces mutées dès qu'elles sont prêtes et un `SELECT` la requête qui a commencé à s'exécuter pendant une mutation verra les données des parties qui ont déjà été mutées ainsi que les données des parties qui n'ont pas encore été mutées. - -Les Mutations sont totalement ordonnées par leur ordre de création et sont appliquées à chaque partie dans cet ordre. Les Mutations sont également partiellement ordonnées avec des insertions - les données insérées dans la table avant la soumission de la mutation seront mutées et les données insérées après ne seront pas mutées. Notez que les mutations ne bloquent en aucune façon les INSERTs. - -Une requête de mutation retourne immédiatement après l'ajout de l'entrée de mutation (dans le cas de tables répliquées à ZooKeeper, pour les tables non compliquées - au système de fichiers). La mutation elle-même s'exécute de manière asynchrone en utilisant les paramètres du profil système. Pour suivre l'avancement des mutations vous pouvez utiliser la [`system.mutations`](../../operations/system-tables.md#system_tables-mutations) table. Une mutation qui a été soumise avec succès continuera à s'exécuter même si les serveurs ClickHouse sont redémarrés. Il n'y a aucun moyen de faire reculer la mutation une fois qu'elle est soumise, mais si la mutation est bloquée pour une raison quelconque, elle peut être annulée avec le [`KILL MUTATION`](misc.md#kill-mutation) requête. - -Les entrées pour les mutations finies ne sont pas supprimées immédiatement (le nombre d'entrées conservées est déterminé par `finished_mutations_to_keep` le moteur de stockage de paramètre). Les anciennes entrées de mutation sont supprimées. - -## ALTER USER {#alter-user-statement} - -Changements clickhouse comptes d'utilisateurs. - -### Syntaxe {#alter-user-syntax} - -``` sql -ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name] - [RENAME TO new_name] - [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] - [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] - [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] - [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] -``` - -### Description {#alter-user-dscr} - -Utiliser `ALTER USER` vous devez avoir le [ALTER USER](grant.md#grant-access-management) privilège. - -### Exemple {#alter-user-examples} - -Définir les rôles accordés par défaut: - -``` sql -ALTER USER user DEFAULT ROLE role1, role2 -``` - -Si les rôles ne sont pas précédemment accordés à un utilisateur, ClickHouse lève une exception. - -Définissez tous les rôles accordés à défaut: - -``` sql -ALTER USER user DEFAULT ROLE ALL -``` - -Si un rôle seront accordés à un utilisateur dans l'avenir, il deviendra automatiquement par défaut. - -Définissez tous les rôles accordés sur default excepting `role1` et `role2`: - -``` sql -ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2 -``` - -## ALTER ROLE {#alter-role-statement} - -Les changements de rôles. - -### Syntaxe {#alter-role-syntax} - -``` sql -ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name] - [RENAME TO new_name] - [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] -``` - -## ALTER ROW POLICY {#alter-row-policy-statement} - -Modifie la stratégie de ligne. - -### Syntaxe {#alter-row-policy-syntax} - -``` sql -ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table - [RENAME TO new_name] - [AS {PERMISSIVE | RESTRICTIVE}] - [FOR SELECT] - [USING {condition | NONE}][,...] - [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] -``` - -## ALTER QUOTA {#alter-quota-statement} - -Les changements de quotas. - -### Syntaxe {#alter-quota-syntax} - -``` sql -ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name] - [RENAME TO new_name] - [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] - [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR} - {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | - NO LIMITS | TRACKING ONLY} [,...]] - [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] -``` - -## ALTER SETTINGS PROFILE {#alter-settings-profile-statement} - -Les changements de quotas. - -### Syntaxe {#alter-settings-profile-syntax} - -``` sql -ALTER SETTINGS PROFILE [IF EXISTS] name [ON CLUSTER cluster_name] - [RENAME TO new_name] - [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/fr/sql-reference/statements/create.md b/docs/fr/sql-reference/statements/create.md deleted file mode 100644 index e7c8040ee6e..00000000000 --- a/docs/fr/sql-reference/statements/create.md +++ /dev/null @@ -1,502 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 35 -toc_title: CREATE ---- - -# Créer des requêtes {#create-queries} - -## CREATE DATABASE {#query-language-create-database} - -Crée la base de données. - -``` sql -CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(...)] -``` - -### Clause {#clauses} - -- `IF NOT EXISTS` - Si l' `db_name` la base de données existe déjà, alors ClickHouse ne crée pas de nouvelle base de données et: - - - Ne lance pas d'exception si la clause est spécifiée. - - Lève une exception si la clause n'est pas spécifiée. - -- `ON CLUSTER` - Clickhouse crée le `db_name` base de données sur tous les serveurs d'un cluster spécifié. - -- `ENGINE` - - - [MySQL](../../engines/database-engines/mysql.md) - Vous permet de récupérer des données à partir du serveur MySQL distant. - Par défaut, ClickHouse utilise son propre [moteur de base de données](../../engines/database-engines/index.md). - -## CREATE TABLE {#create-table-query} - -Le `CREATE TABLE` la requête peut avoir plusieurs formes. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2], - ... -) ENGINE = engine -``` - -Crée une table nommée ‘name’ dans le ‘db’ base de données ou la base de données actuelle si ‘db’ n'est pas définie, avec la structure spécifiée entre parenthèses et l' ‘engine’ moteur. -La structure de la table est une liste de descriptions de colonnes. Si les index sont pris en charge par le moteur, ils sont indiqués comme paramètres pour le moteur de table. - -Une description de colonne est `name type` dans le cas le plus simple. Exemple: `RegionID UInt32`. -Des Expressions peuvent également être définies pour les valeurs par défaut (voir ci-dessous). - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] -``` - -Crée une table avec la même structure qu'une autre table. Vous pouvez spécifier un moteur différent pour la table. Si le moteur n'est pas spécifié, le même moteur sera utilisé que pour la `db2.name2` table. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() -``` - -Crée une table avec la structure et les données renvoyées par [fonction de table](../table-functions/index.md#table-functions). - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... -``` - -Crée une table avec une structure comme le résultat de l' `SELECT` une requête avec les ‘engine’ moteur, et le remplit avec des données de SELECT. - -Dans tous les cas, si `IF NOT EXISTS` est spécifié, la requête ne renvoie pas une erreur si la table existe déjà. Dans ce cas, la requête ne font rien. - -Il peut y avoir d'autres clauses après le `ENGINE` la clause dans la requête. Voir la documentation détaillée sur la façon de créer des tables dans les descriptions de [moteurs de table](../../engines/table-engines/index.md#table_engines). - -### Les Valeurs Par Défaut {#create-default-values} - -La description de colonne peut spécifier une expression pour une valeur par défaut, de l'une des manières suivantes:`DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`. -Exemple: `URLDomain String DEFAULT domain(URL)`. - -Si une expression pour la valeur par défaut n'est pas définie, les valeurs par défaut seront définies sur zéros pour les nombres, chaînes vides pour les chaînes, tableaux vides pour les tableaux et `1970-01-01` pour les dates ou zero unix timestamp pour les dates avec le temps. Les valeurs NULL ne sont pas prises en charge. - -Si l'expression par défaut est définie, le type de colonne est facultatif. S'il n'y a pas de type explicitement défini, le type d'expression par défaut est utilisé. Exemple: `EventDate DEFAULT toDate(EventTime)` – the ‘Date’ type sera utilisé pour la ‘EventDate’ colonne. - -Si le type de données et l'expression par défaut sont définis explicitement, cette expression sera convertie au type spécifié à l'aide des fonctions de conversion de type. Exemple: `Hits UInt32 DEFAULT 0` signifie la même chose que `Hits UInt32 DEFAULT toUInt32(0)`. - -Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions don't contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. - -`DEFAULT expr` - -Valeur par défaut normale. Si la requête INSERT ne spécifie pas la colonne correspondante, elle sera remplie en calculant l'expression correspondante. - -`MATERIALIZED expr` - -Expression matérialisée. Une telle colonne ne peut pas être spécifiée pour INSERT, car elle est toujours calculée. -Pour un INSERT sans Liste de colonnes, ces colonnes ne sont pas prises en compte. -De plus, cette colonne n'est pas substituée lors de l'utilisation d'un astérisque dans une requête SELECT. C'est pour préserver l'invariant que le dump obtenu en utilisant `SELECT *` peut être inséré dans la table en utilisant INSERT sans spécifier la liste des colonnes. - -`ALIAS expr` - -Synonyme. Une telle colonne n'est pas du tout stockée dans la table. -Ses valeurs ne peuvent pas être insérées dans une table et elles ne sont pas substituées lors de l'utilisation d'un astérisque dans une requête SELECT. -Il peut être utilisé dans SELECTs si l'alias est développé pendant l'analyse des requêtes. - -Lorsque vous utilisez la requête ALTER pour ajouter de nouvelles colonnes, les anciennes données de ces colonnes ne sont pas écrites. Au lieu de cela, lors de la lecture d'anciennes données qui n'ont pas de valeurs pour les nouvelles colonnes, les expressions sont calculées à la volée par défaut. Cependant, si l'exécution des expressions nécessite différentes colonnes qui ne sont pas indiquées dans la requête, ces colonnes seront en outre lues, mais uniquement pour les blocs de données qui en ont besoin. - -Si vous ajoutez une nouvelle colonne à une table mais modifiez ultérieurement son expression par défaut, les valeurs utilisées pour les anciennes données changeront (pour les données où les valeurs n'ont pas été stockées sur le disque). Notez que lors de l'exécution de fusions d'arrière-plan, les données des colonnes manquantes dans l'une des parties de fusion sont écrites dans la partie fusionnée. - -Il n'est pas possible de définir des valeurs par défaut pour les éléments dans les structures de données. - -### Contraintes {#constraints} - -Avec les descriptions de colonnes des contraintes peuvent être définies: - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], - ... - CONSTRAINT constraint_name_1 CHECK boolean_expr_1, - ... -) ENGINE = engine -``` - -`boolean_expr_1` pourrait par n'importe quelle expression booléenne. Si les contraintes sont définies pour la table, chacun d'eux sera vérifiée pour chaque ligne `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. - -L'ajout d'une grande quantité de contraintes peut affecter négativement les performances de big `INSERT` requête. - -### Expression TTL {#ttl-expression} - -Définit la durée de stockage des valeurs. Peut être spécifié uniquement pour les tables mergetree-family. Pour la description détaillée, voir [TTL pour les colonnes et les tableaux](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). - -### Codecs De Compression De Colonne {#codecs} - -Par défaut, ClickHouse applique le `lz4` méthode de compression. Pour `MergeTree`- famille de moteurs Vous pouvez modifier la méthode de compression par défaut dans le [compression](../../operations/server-configuration-parameters/settings.md#server-settings-compression) section d'une configuration de serveur. Vous pouvez également définir la méthode de compression pour chaque colonne `CREATE TABLE` requête. - -``` sql -CREATE TABLE codec_example -( - dt Date CODEC(ZSTD), - ts DateTime CODEC(LZ4HC), - float_value Float32 CODEC(NONE), - double_value Float64 CODEC(LZ4HC(9)) - value Float32 CODEC(Delta, ZSTD) -) -ENGINE = -... -``` - -Si un codec est spécifié, le codec par défaut ne s'applique pas. Les Codecs peuvent être combinés dans un pipeline, par exemple, `CODEC(Delta, ZSTD)`. Pour sélectionner la meilleure combinaison de codecs pour votre projet, passez des benchmarks similaires à ceux décrits dans Altinity [Nouveaux encodages pour améliorer L'efficacité du ClickHouse](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. - -!!! warning "Avertissement" - Vous ne pouvez pas décompresser les fichiers de base de données ClickHouse avec des utilitaires externes tels que `lz4`. Au lieu de cela, utilisez le spécial [clickhouse-compresseur](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utilitaire. - -La Compression est prise en charge pour les moteurs de tableau suivants: - -- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) famille. Prend en charge les codecs de compression de colonne et la sélection de la méthode de compression par défaut par [compression](../../operations/server-configuration-parameters/settings.md#server-settings-compression) paramètre. -- [Journal](../../engines/table-engines/log-family/index.md) famille. Utilise le `lz4` méthode de compression par défaut et prend en charge les codecs de compression de colonne. -- [Définir](../../engines/table-engines/special/set.md). Uniquement pris en charge la compression par défaut. -- [Rejoindre](../../engines/table-engines/special/join.md). Uniquement pris en charge la compression par défaut. - -ClickHouse prend en charge les codecs à usage commun et les codecs spécialisés. - -#### Codecs Spécialisés {#create-query-specialized-codecs} - -Ces codecs sont conçus pour rendre la compression plus efficace en utilisant des fonctionnalités spécifiques des données. Certains de ces codecs ne compressent pas les données eux-mêmes. Au lieu de cela, ils préparent les données pour un codec à usage commun, qui les compresse mieux que sans cette préparation. - -Spécialisé codecs: - -- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` sont utilisés pour stocker des valeurs delta, donc `delta_bytes` est la taille maximale des valeurs brutes. Possible `delta_bytes` valeurs: 1, 2, 4, 8. La valeur par défaut pour `delta_bytes` être `sizeof(type)` si égale à 1, 2, 4 ou 8. Dans tous les autres cas, c'est 1. -- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla: Une Base De Données De Séries Chronologiques Rapide, Évolutive Et En Mémoire](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla: Une Base De Données De Séries Chronologiques Rapide, Évolutive Et En Mémoire](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` et `DateTime`). À chaque étape de son algorithme, le codec prend un bloc de 64 valeurs, les place dans une matrice de 64x64 bits, le transpose, recadre les bits de valeurs inutilisés et renvoie le reste sous forme de séquence. Les bits inutilisés sont les bits, qui ne diffèrent pas entre les valeurs maximum et minimum dans la partie de données entière pour laquelle la compression est utilisée. - -`DoubleDelta` et `Gorilla` les codecs sont utilisés dans Gorilla TSDB comme composants de son algorithme de compression. L'approche Gorilla est efficace dans les scénarios où il y a une séquence de valeurs qui changent lentement avec leurs horodatages. Les horodatages sont effectivement compressés par le `DoubleDelta` codec, et les valeurs sont effectivement comprimé par le `Gorilla` codec. Par exemple, pour obtenir une table stockée efficacement, vous pouvez la créer dans la configuration suivante: - -``` sql -CREATE TABLE codec_example -( - timestamp DateTime CODEC(DoubleDelta), - slow_values Float32 CODEC(Gorilla) -) -ENGINE = MergeTree() -``` - -#### Codecs À Usage Général {#create-query-general-purpose-codecs} - -Codec: - -- `NONE` — No compression. -- `LZ4` — Lossless [algorithme de compression de données](https://github.com/lz4/lz4) utilisé par défaut. Applique la compression rapide LZ4. -- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` s'applique le niveau par défaut. Niveaux possibles: \[1, 12\]. Plage de niveau recommandée: \[4, 9\]. -- `ZSTD[(level)]` — [Algorithme de compression ZSTD](https://en.wikipedia.org/wiki/Zstandard) avec configurables `level`. Niveaux possibles: \[1, 22\]. Valeur par défaut: 1. - -Des niveaux de compression élevés sont utiles pour les scénarios asymétriques, comme compresser une fois, décompresser à plusieurs reprises. Des niveaux plus élevés signifient une meilleure compression et une utilisation plus élevée du processeur. - -## Les Tables Temporaires {#temporary-tables} - -Clickhouse prend en charge les tables temporaires qui ont les caractéristiques suivantes: - -- Les tables temporaires disparaissent à la fin de la session, y compris si la connexion est perdue. -- Une table temporaire utilise uniquement le moteur de mémoire. -- La base de données ne peut pas être spécifiée pour une table temporaire. Il est créé en dehors des bases de données. -- Impossible de créer une table temporaire avec une requête DDL distribuée sur tous les serveurs de cluster (en utilisant `ON CLUSTER`): ce tableau n'existe que dans la session en cours. -- Si une table temporaire a le même nom qu'une autre et qu'une requête spécifie le nom de la table sans spécifier la base de données, la table temporaire sera utilisée. -- Pour le traitement des requêtes distribuées, les tables temporaires utilisées dans une requête sont transmises à des serveurs distants. - -Pour créer une table temporaire, utilisez la syntaxe suivante: - -``` sql -CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) -``` - -Dans la plupart des cas, les tables temporaires ne sont pas créées manuellement, mais lors de l'utilisation de données externes pour une requête ou pour `(GLOBAL) IN`. Pour plus d'informations, consultez les sections appropriées - -Il est possible d'utiliser des tables avec [Moteur = mémoire](../../engines/table-engines/special/memory.md) au lieu de tables temporaires. - -## Requêtes DDL distribuées (sur la clause CLUSTER) {#distributed-ddl-queries-on-cluster-clause} - -Le `CREATE`, `DROP`, `ALTER`, et `RENAME` les requêtes prennent en charge l'exécution distribuée sur un cluster. -Par exemple, la requête suivante crée la `all_hits` `Distributed` tableau sur chaque ordinateur hôte `cluster`: - -``` sql -CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits) -``` - -Pour exécuter ces requêtes correctement, chaque hôte doit avoir la même définition de cluster (pour simplifier la synchronisation des configs, vous pouvez utiliser des substitutions de ZooKeeper). Ils doivent également se connecter aux serveurs ZooKeeper. -La version locale de la requête sera finalement implémentée sur chaque hôte du cluster, même si certains hôtes ne sont actuellement pas disponibles. L'ordre d'exécution des requêtes au sein d'un seul hôte est garanti. - -## CREATE VIEW {#create-view} - -``` sql -CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... -``` - -Crée une vue. Il existe deux types de vues: normale et matérialisée. - -Les vues normales ne stockent aucune donnée, mais effectuent simplement une lecture à partir d'une autre table. En d'autres termes, une vue normale n'est rien de plus qu'une requête enregistrée. Lors de la lecture à partir d'une vue, cette requête enregistrée est utilisée comme sous-requête dans la clause FROM. - -Par exemple, supposons que vous avez créé une vue: - -``` sql -CREATE VIEW view AS SELECT ... -``` - -et écrit une requête: - -``` sql -SELECT a, b, c FROM view -``` - -Cette requête est entièrement équivalente à l'utilisation de la sous requête: - -``` sql -SELECT a, b, c FROM (SELECT ...) -``` - -Les vues matérialisées stockent les données transformées par la requête SELECT correspondante. - -Lors de la création d'une vue matérialisée sans `TO [db].[table]`, you must specify ENGINE – the table engine for storing data. - -Lors de la création d'une vue matérialisée avec `TO [db].[table]` vous ne devez pas utiliser `POPULATE`. - -Une vue matérialisée est agencée comme suit: lors de l'insertion de données dans la table spécifiée dans SELECT, une partie des données insérées est convertie par cette requête SELECT, et le résultat est inséré dans la vue. - -Si vous spécifiez POPULATE, les données de table existantes sont insérées dans la vue lors de sa création, comme si `CREATE TABLE ... AS SELECT ...` . Sinon, la requête ne contient que les données insérées dans la table après la création de la vue. Nous ne recommandons pas D'utiliser POPULATE, car les données insérées dans la table lors de la création de la vue ne seront pas insérées dedans. - -A `SELECT` la requête peut contenir `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` est définie, les données sont agrégées lors de l'insertion, mais uniquement dans un seul paquet de données insérées. Les données ne seront pas agrégées davantage. L'exception concerne l'utilisation d'un moteur qui effectue indépendamment l'agrégation de données, par exemple `SummingMergeTree`. - -L'exécution de `ALTER` les requêtes sur les vues matérialisées n'ont pas été complètement développées, elles pourraient donc être gênantes. Si la vue matérialisée utilise la construction `TO [db.]name` vous pouvez `DETACH` la vue, exécutez `ALTER` pour la table cible, puis `ATTACH` précédemment détaché (`DETACH`) vue. - -Les vues ressemblent aux tables normales. Par exemple, ils sont répertoriés dans le résultat de la `SHOW TABLES` requête. - -Il n'y a pas de requête séparée pour supprimer des vues. Pour supprimer une vue, utilisez `DROP TABLE`. - -## CREATE DICTIONARY {#create-dictionary-query} - -``` sql -CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] -( - key1 type1 [DEFAULT|EXPRESSION expr1] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], - key2 type2 [DEFAULT|EXPRESSION expr2] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], - attr1 type2 [DEFAULT|EXPRESSION expr3], - attr2 type2 [DEFAULT|EXPRESSION expr4] -) -PRIMARY KEY key1, key2 -SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) -LAYOUT(LAYOUT_NAME([param_name param_value])) -LIFETIME({MIN min_val MAX max_val | max_val}) -``` - -Crée [externe dictionnaire](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) avec le [structure](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [disposition](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) et [vie](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - -Structure de dictionnaire externe se compose d'attributs. Les attributs du dictionnaire sont spécifiés de la même manière que les colonnes du tableau. La seule propriété d'attribut requise est son type, toutes les autres propriétés peuvent avoir des valeurs par défaut. - -Selon le dictionnaire [disposition](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) un ou plusieurs attributs peuvent être spécifiés comme les clés de dictionnaire. - -Pour plus d'informations, voir [Dictionnaires Externes](../dictionaries/external-dictionaries/external-dicts.md) section. - -## CREATE USER {#create-user-statement} - -Crée un [compte d'utilisateur](../../operations/access-rights.md#user-account-management). - -### Syntaxe {#create-user-syntax} - -``` sql -CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] - [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] - [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] - [DEFAULT ROLE role [,...]] - [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] -``` - -#### Identification {#identification} - -Il existe de multiples façons d'identification d'un utilisateur: - -- `IDENTIFIED WITH no_password` -- `IDENTIFIED WITH plaintext_password BY 'qwerty'` -- `IDENTIFIED WITH sha256_password BY 'qwerty'` ou `IDENTIFIED BY 'password'` -- `IDENTIFIED WITH sha256_hash BY 'hash'` -- `IDENTIFIED WITH double_sha1_password BY 'qwerty'` -- `IDENTIFIED WITH double_sha1_hash BY 'hash'` - -#### L'Utilisateur De L'Hôte {#user-host} - -L'hôte utilisateur est un hôte à partir duquel une connexion au serveur ClickHouse peut être établie. Hôte peut être spécifié dans le `HOST` section de requête par les moyens suivants: - -- `HOST IP 'ip_address_or_subnetwork'` — User can connect to ClickHouse server only from the specified IP address or a [sous-réseau](https://en.wikipedia.org/wiki/Subnetwork). Exemple: `HOST IP '192.168.0.0/16'`, `HOST IP '2001:DB8::/32'`. Pour une utilisation en production, spécifiez uniquement `HOST IP` (adresses IP et leurs masques), depuis l'utilisation `host` et `host_regexp` peut causer une latence supplémentaire. -- `HOST ANY` — User can connect from any location. This is default option. -- `HOST LOCAL` — User can connect only locally. -- `HOST NAME 'fqdn'` — User host can be specified as FQDN. For example, `HOST NAME 'mysite.com'`. -- `HOST NAME REGEXP 'regexp'` — You can use [pcre](http://www.pcre.org/) expressions régulières lors de la spécification des hôtes utilisateur. Exemple, `HOST NAME REGEXP '.*\.mysite\.com'`. -- `HOST LIKE 'template'` — Allows you use the [LIKE](../functions/string-search-functions.md#function-like) opérateur de filtre de l'utilisateur hôtes. Exemple, `HOST LIKE '%'` est équivalent à `HOST ANY`, `HOST LIKE '%.mysite.com'` filtre tous les hôtes dans le `mysite.com` domaine. - -Une autre façon de spécifier l'hôte est d'utiliser `@` syntaxe avec le nom d'utilisateur. Exemple: - -- `CREATE USER mira@'127.0.0.1'` — Equivalent to the `HOST IP` syntaxe. -- `CREATE USER mira@'localhost'` — Equivalent to the `HOST LOCAL` syntaxe. -- `CREATE USER mira@'192.168.%.%'` — Equivalent to the `HOST LIKE` syntaxe. - -!!! info "Avertissement" - Clickhouse traite `user_name@'address'` comme un nom d'utilisateur dans son ensemble. Donc, techniquement, vous pouvez créer plusieurs utilisateurs avec `user_name` et différentes constructions après `@`. Nous ne recommandons pas de le faire. - -### Exemple {#create-user-examples} - -Créer le compte d'utilisateur `mira` protégé par le mot de passe `qwerty`: - -``` sql -CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' -``` - -`mira` devrait démarrer l'application client sur l'hôte où le serveur ClickHouse s'exécute. - -Créer le compte d'utilisateur `john`, attribuez-lui des rôles et définissez ces rôles par défaut: - -``` sql -CREATE USER john DEFAULT ROLE role1, role2 -``` - -Créer le compte d'utilisateur `john` et faire tous ses futurs rôles par défaut: - -``` sql -ALTER USER user DEFAULT ROLE ALL -``` - -Quand un rôle sera attribué à `john` dans l'avenir, il deviendra automatiquement par défaut. - -Créer le compte d'utilisateur `john` et faire tous ses futurs rôles par défaut sauf `role1` et `role2`: - -``` sql -ALTER USER john DEFAULT ROLE ALL EXCEPT role1, role2 -``` - -## CREATE ROLE {#create-role-statement} - -Crée un [rôle](../../operations/access-rights.md#role-management). - -### Syntaxe {#create-role-syntax} - -``` sql -CREATE ROLE [IF NOT EXISTS | OR REPLACE] name - [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] -``` - -### Description {#create-role-description} - -Rôle est un ensemble de [privilège](grant.md#grant-privileges). Un utilisateur reçoit un rôle obtient tous les privilèges de ce rôle. - -Un utilisateur peut être affecté à plusieurs rôles. Les utilisateurs peuvent appliquer leurs rôles accordés dans des combinaisons arbitraires par le [SET ROLE](misc.md#set-role-statement) déclaration. La finale de la portée des privilèges est un ensemble combiné de tous les privilèges de tous les rôles. Si un utilisateur a des privilèges accordés directement à son compte d'utilisateur, ils sont également combinés avec les privilèges accordés par les rôles. - -L'utilisateur peut avoir des rôles par défaut qui s'appliquent à la connexion de l'utilisateur. Pour définir les rôles par défaut, utilisez [SET DEFAULT ROLE](misc.md#set-default-role-statement) - déclaration ou de la [ALTER USER](alter.md#alter-user-statement) déclaration. - -Pour révoquer un rôle, utilisez [REVOKE](revoke.md) déclaration. - -Pour supprimer le rôle, utilisez [DROP ROLE](misc.md#drop-role-statement) déclaration. Le rôle supprimé est automatiquement révoqué de tous les utilisateurs et rôles auxquels il a été accordé. - -### Exemple {#create-role-examples} - -``` sql -CREATE ROLE accountant; -GRANT SELECT ON db.* TO accountant; -``` - -Cette séquence de requêtes crée le rôle `accountant` cela a le privilège de lire les données du `accounting` la base de données. - -Octroi du rôle à l'utilisateur `mira`: - -``` sql -GRANT accountant TO mira; -``` - -Une fois le rôle accordé, l'utilisateur peut l'utiliser et effectuer les requêtes autorisées. Exemple: - -``` sql -SET ROLE accountant; -SELECT * FROM db.*; -``` - -## CREATE ROW POLICY {#create-row-policy-statement} - -Crée un [filtre pour les lignes](../../operations/access-rights.md#row-policy-management) qu'un utilisateur peut lire à partir d'une table. - -### Syntaxe {#create-row-policy-syntax} - -``` sql -CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table - [AS {PERMISSIVE | RESTRICTIVE}] - [FOR SELECT] - [USING condition] - [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] -``` - -#### Section AS {#create-row-policy-as} - -À l'aide de cette section, vous pouvez créer des stratégies permissives ou restrictives. - -La stratégie Permissive accorde l'accès aux lignes. Les stratégies permissives qui s'appliquent à la même table sont combinées ensemble en utilisant le booléen `OR` opérateur. Les stratégies sont permissives par défaut. - -La politique Restrictive limite l'accès à la ligne. Les politiques restrictives qui s'appliquent à la même table sont combinées en utilisant le booléen `AND` opérateur. - -Les stratégies restrictives s'appliquent aux lignes qui ont passé les filtres permissifs. Si vous définissez des stratégies restrictives mais aucune politique permissive, l'utilisateur ne peut obtenir aucune ligne de la table. - -#### La Section DE {#create-row-policy-to} - -Dans la section `TO` vous pouvez donner une liste mixte de rôles et d'utilisateurs, par exemple, `CREATE ROW POLICY ... TO accountant, john@localhost`. - -Mot `ALL` signifie Tous les utilisateurs de ClickHouse, y compris l'utilisateur actuel. Mot `ALL EXCEPT` autoriser à exclure certains utilisateurs de la liste tous les utilisateurs, par exemple `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` - -### Exemple {#examples} - -- `CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost` -- `CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira` - -## CREATE QUOTA {#create-quota-statement} - -Crée un [quota](../../operations/access-rights.md#quotas-management) qui peut être attribué à un utilisateur ou un rôle. - -### Syntaxe {#create-quota-syntax} - -``` sql -CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] - [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] - [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR} - {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | - NO LIMITS | TRACKING ONLY} [,...]] - [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] -``` - -### Exemple {#create-quota-example} - -Limiter le nombre maximum de requêtes pour l'utilisateur actuel avec 123 requêtes en 15 mois contrainte: - -``` sql -CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER -``` - -## CREATE SETTINGS PROFILE {#create-settings-profile-statement} - -Crée un [les paramètres de profil](../../operations/access-rights.md#settings-profiles-management) qui peut être attribué à un utilisateur ou un rôle. - -### Syntaxe {#create-settings-profile-syntax} - -``` sql -CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] - [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] -``` - -# Exemple {#create-settings-profile-syntax} - -Créer l' `max_memory_usage_profile` paramètres du profil avec valeur et contraintes pour `max_memory_usage` paramètre. L'affecter à `robin`: - -``` sql -CREATE SETTINGS PROFILE max_memory_usage_profile SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/create/) diff --git a/docs/fr/sql-reference/statements/grant.md b/docs/fr/sql-reference/statements/grant.md deleted file mode 100644 index 143c9a36e33..00000000000 --- a/docs/fr/sql-reference/statements/grant.md +++ /dev/null @@ -1,476 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 39 -toc_title: GRANT ---- - -# GRANT {#grant} - -- Accorder [privilège](#grant-privileges) pour ClickHouse comptes d'utilisateurs ou des rôles. -- Affecte des rôles à des comptes d'utilisateurs ou à d'autres rôles. - -Pour révoquer les privilèges, utilisez [REVOKE](revoke.md) déclaration. Vous pouvez également classer les privilèges accordés par le [SHOW GRANTS](show.md#show-grants-statement) déclaration. - -## Accorder La Syntaxe Des Privilèges {#grant-privigele-syntax} - -``` sql -GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] -``` - -- `privilege` — Type of privilege. -- `role` — ClickHouse user role. -- `user` — ClickHouse user account. - -Le `WITH GRANT OPTION` clause de subventions `user` ou `role` avec l'autorisation de réaliser des `GRANT` requête. Les utilisateurs peuvent accorder des privilèges de la même portée qu'ils ont et moins. - -## Attribution De La Syntaxe Du Rôle {#assign-role-syntax} - -``` sql -GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION] -``` - -- `role` — ClickHouse user role. -- `user` — ClickHouse user account. - -Le `WITH ADMIN OPTION` clause de jeux [ADMIN OPTION](#admin-option-privilege) privilège pour `user` ou `role`. - -## Utilisation {#grant-usage} - -Utiliser `GRANT` votre compte doit avoir le `GRANT OPTION` privilège. Vous ne pouvez accorder des privilèges que dans le cadre de vos privilèges de Compte. - -Par exemple, l'administrateur a accordé des privilèges `john` compte par la requête: - -``` sql -GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION -``` - -Cela signifie que `john` a la permission d'effectuer: - -- `SELECT x,y FROM db.table`. -- `SELECT x FROM db.table`. -- `SELECT y FROM db.table`. - -`john` ne pouvez pas effectuer de `SELECT z FROM db.table`. Le `SELECT * FROM db.table` aussi n'est pas disponible. En traitant cette requête, ClickHouse ne renvoie aucune donnée, même `x` et `y`. La seule exception est si une table contient uniquement `x` et `y` colonnes, dans ce cas ClickHouse renvoie toutes les données. - -Également `john` a l' `GRANT OPTION` privilège, de sorte qu'il peut accorder à d'autres utilisateurs avec des privilèges de la même ou de la plus petite portée. - -Spécification des privilèges vous pouvez utiliser asterisk (`*`) au lieu d'une table ou d'un nom de base de données. Par exemple, l' `GRANT SELECT ON db.* TO john` requête permet `john` pour effectuer la `SELECT` requête sur toutes les tables dans `db` la base de données. En outre, vous pouvez omettre le nom de la base de données. Dans ce cas, des privilèges sont accordés pour la base de données actuelle, par exemple: `GRANT SELECT ON * TO john` accorde le privilège sur toutes les tables dans la base de données actuelle, `GRANT SELECT ON mytable TO john` accorde le privilège sur le `mytable` table dans la base de données actuelle. - -L'accès à la `system` la base de données est toujours autorisée (puisque cette base de données est utilisée pour traiter les requêtes). - -Vous pouvez accorder plusieurs privilèges à plusieurs comptes dans une requête. Requête `GRANT SELECT, INSERT ON *.* TO john, robin` permet de comptes `john` et `robin` pour effectuer la `INSERT` et `SELECT` des requêtes sur toutes les tables de toutes les bases de données sur le serveur. - -## Privilège {#grant-privileges} - -Privilège est une autorisation pour effectuer un type spécifique de requêtes. - -Les privilèges ont une structure hiérarchique. Un ensemble de requêtes autorisées dépend de la portée des privilèges. - -Hiérarchie des privilèges: - -- [SELECT](#grant-select) -- [INSERT](#grant-insert) -- [ALTER](#grant-alter) - - `ALTER TABLE` - - `ALTER UPDATE` - - `ALTER DELETE` - - `ALTER COLUMN` - - `ALTER ADD COLUMN` - - `ALTER DROP COLUMN` - - `ALTER MODIFY COLUMN` - - `ALTER COMMENT COLUMN` - - `ALTER CLEAR COLUMN` - - `ALTER RENAME COLUMN` - - `ALTER INDEX` - - `ALTER ORDER BY` - - `ALTER ADD INDEX` - - `ALTER DROP INDEX` - - `ALTER MATERIALIZE INDEX` - - `ALTER CLEAR INDEX` - - `ALTER CONSTRAINT` - - `ALTER ADD CONSTRAINT` - - `ALTER DROP CONSTRAINT` - - `ALTER TTL` - - `ALTER MATERIALIZE TTL` - - `ALTER SETTINGS` - - `ALTER MOVE PARTITION` - - `ALTER FETCH PARTITION` - - `ALTER FREEZE PARTITION` - - `ALTER VIEW` - - `ALTER VIEW REFRESH` - - `ALTER VIEW MODIFY QUERY` -- [CREATE](#grant-create) - - `CREATE DATABASE` - - `CREATE TABLE` - - `CREATE VIEW` - - `CREATE DICTIONARY` - - `CREATE TEMPORARY TABLE` -- [DROP](#grant-drop) - - `DROP DATABASE` - - `DROP TABLE` - - `DROP VIEW` - - `DROP DICTIONARY` -- [TRUNCATE](#grant-truncate) -- [OPTIMIZE](#grant-optimize) -- [SHOW](#grant-show) - - `SHOW DATABASES` - - `SHOW TABLES` - - `SHOW COLUMNS` - - `SHOW DICTIONARIES` -- [KILL QUERY](#grant-kill-query) -- [ACCESS MANAGEMENT](#grant-access-management) - - `CREATE USER` - - `ALTER USER` - - `DROP USER` - - `CREATE ROLE` - - `ALTER ROLE` - - `DROP ROLE` - - `CREATE ROW POLICY` - - `ALTER ROW POLICY` - - `DROP ROW POLICY` - - `CREATE QUOTA` - - `ALTER QUOTA` - - `DROP QUOTA` - - `CREATE SETTINGS PROFILE` - - `ALTER SETTINGS PROFILE` - - `DROP SETTINGS PROFILE` - - `SHOW ACCESS` - - `SHOW_USERS` - - `SHOW_ROLES` - - `SHOW_ROW_POLICIES` - - `SHOW_QUOTAS` - - `SHOW_SETTINGS_PROFILES` - - `ROLE ADMIN` -- [SYSTEM](#grant-system) - - `SYSTEM SHUTDOWN` - - `SYSTEM DROP CACHE` - - `SYSTEM DROP DNS CACHE` - - `SYSTEM DROP MARK CACHE` - - `SYSTEM DROP UNCOMPRESSED CACHE` - - `SYSTEM RELOAD` - - `SYSTEM RELOAD CONFIG` - - `SYSTEM RELOAD DICTIONARY` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES` - - `SYSTEM MERGES` - - `SYSTEM TTL MERGES` - - `SYSTEM FETCHES` - - `SYSTEM MOVES` - - `SYSTEM SENDS` - - `SYSTEM DISTRIBUTED SENDS` - - `SYSTEM REPLICATED SENDS` - - `SYSTEM REPLICATION QUEUES` - - `SYSTEM SYNC REPLICA` - - `SYSTEM RESTART REPLICA` - - `SYSTEM FLUSH` - - `SYSTEM FLUSH DISTRIBUTED` - - `SYSTEM FLUSH LOGS` -- [INTROSPECTION](#grant-introspection) - - `addressToLine` - - `addressToSymbol` - - `demangle` -- [SOURCES](#grant-sources) - - `FILE` - - `URL` - - `REMOTE` - - `YSQL` - - `ODBC` - - `JDBC` - - `HDFS` - - `S3` -- [dictGet](#grant-dictget) - -Exemples de la façon dont cette hiérarchie est traitée: - -- Le `ALTER` privilège comprend tous les autres `ALTER*` privilège. -- `ALTER CONSTRAINT` comprendre `ALTER ADD CONSTRAINT` et `ALTER DROP CONSTRAINT` privilège. - -Les privilèges sont appliqués à différents niveaux. Connaissant un niveau suggère la syntaxe disponible pour le privilège. - -Les niveaux (du plus faible au plus élevé): - -- `COLUMN` — Privilege can be granted for column, table, database, or globally. -- `TABLE` — Privilege can be granted for table, database, or globally. -- `VIEW` — Privilege can be granted for view, database, or globally. -- `DICTIONARY` — Privilege can be granted for dictionary, database, or globally. -- `DATABASE` — Privilege can be granted for database or globally. -- `GLOBAL` — Privilege can be granted only globally. -- `GROUP` — Groups privileges of different levels. When `GROUP`- le privilège de niveau est accordé, seuls les privilèges du groupe sont accordés qui correspondent à la syntaxe utilisée. - -Exemples de syntaxe: - -- `GRANT SELECT(x) ON db.table TO user` -- `GRANT SELECT ON db.* TO user` - -Exemples de syntaxe refusée: - -- `GRANT CREATE USER(x) ON db.table TO user` -- `GRANT CREATE USER ON db.* TO user` - -Le privilège spécial [ALL](#grant-all) accorde tous les privilèges à un compte d'utilisateur ou à un rôle. - -Par défaut, un compte d'utilisateur ou un rôle a pas de privilèges. - -Si un utilisateur ou un rôle ont pas de privilèges qu'il s'affiche comme [NONE](#grant-none) privilège. - -Certaines requêtes par leur implémentation nécessitent un ensemble de privilèges. Par exemple, pour effectuer la [RENAME](misc.md#misc_operations-rename) requête vous avez besoin des privilèges suivants: `SELECT`, `CREATE TABLE`, `INSERT` et `DROP TABLE`. - -### SELECT {#grant-select} - -Permet d'effectuer des [SELECT](select/index.md) requête. - -Le niveau de privilège: `COLUMN`. - -**Description** - -L'utilisateur accordé avec ce privilège peut effectuer `SELECT` requêtes sur une liste spécifiée de colonnes dans la table et la base de données spécifiées. Si l'utilisateur inclut d'autres colonnes, une requête ne renvoie aucune donnée. - -Considérez le privilège suivant: - -``` sql -GRANT SELECT(x,y) ON db.table TO john -``` - -Ce privilège permet à `john` pour effectuer toute `SELECT` requête qui implique des données du `x` et/ou `y` les colonnes en `db.table`. Exemple, `SELECT x FROM db.table`. `john` ne pouvez pas effectuer de `SELECT z FROM db.table`. Le `SELECT * FROM db.table` aussi n'est pas disponible. En traitant cette requête, ClickHouse ne renvoie aucune donnée, même `x` et `y`. La seule exception est si une table contient uniquement `x` et `y` colonnes, dans ce cas ClickHouse renvoie toutes les données. - -### INSERT {#grant-insert} - -Permet d'effectuer des [INSERT](insert-into.md) requête. - -Le niveau de privilège: `COLUMN`. - -**Description** - -L'utilisateur accordé avec ce privilège peut effectuer `INSERT` requêtes sur une liste spécifiée de colonnes dans la table et la base de données spécifiées. Si l'utilisateur inclut d'autres colonnes, une requête n'insère aucune donnée. - -**Exemple** - -``` sql -GRANT INSERT(x,y) ON db.table TO john -``` - -Le privilège accordé permet `john` pour insérer des données à l' `x` et/ou `y` les colonnes en `db.table`. - -### ALTER {#grant-alter} - -Permet d'effectuer des [ALTER](alter.md) requêtes correspondant à la hiérarchie de privilèges suivante: - -- `ALTER`. Niveau: `COLUMN`. - - `ALTER TABLE`. Niveau: `GROUP` - - `ALTER UPDATE`. Niveau: `COLUMN`. Alias: `UPDATE` - - `ALTER DELETE`. Niveau: `COLUMN`. Alias: `DELETE` - - `ALTER COLUMN`. Niveau: `GROUP` - - `ALTER ADD COLUMN`. Niveau: `COLUMN`. Alias: `ADD COLUMN` - - `ALTER DROP COLUMN`. Niveau: `COLUMN`. Alias: `DROP COLUMN` - - `ALTER MODIFY COLUMN`. Niveau: `COLUMN`. Alias: `MODIFY COLUMN` - - `ALTER COMMENT COLUMN`. Niveau: `COLUMN`. Alias: `COMMENT COLUMN` - - `ALTER CLEAR COLUMN`. Niveau: `COLUMN`. Alias: `CLEAR COLUMN` - - `ALTER RENAME COLUMN`. Niveau: `COLUMN`. Alias: `RENAME COLUMN` - - `ALTER INDEX`. Niveau: `GROUP`. Alias: `INDEX` - - `ALTER ORDER BY`. Niveau: `TABLE`. Alias: `ALTER MODIFY ORDER BY`, `MODIFY ORDER BY` - - `ALTER ADD INDEX`. Niveau: `TABLE`. Alias: `ADD INDEX` - - `ALTER DROP INDEX`. Niveau: `TABLE`. Alias: `DROP INDEX` - - `ALTER MATERIALIZE INDEX`. Niveau: `TABLE`. Alias: `MATERIALIZE INDEX` - - `ALTER CLEAR INDEX`. Niveau: `TABLE`. Alias: `CLEAR INDEX` - - `ALTER CONSTRAINT`. Niveau: `GROUP`. Alias: `CONSTRAINT` - - `ALTER ADD CONSTRAINT`. Niveau: `TABLE`. Alias: `ADD CONSTRAINT` - - `ALTER DROP CONSTRAINT`. Niveau: `TABLE`. Alias: `DROP CONSTRAINT` - - `ALTER TTL`. Niveau: `TABLE`. Alias: `ALTER MODIFY TTL`, `MODIFY TTL` - - `ALTER MATERIALIZE TTL`. Niveau: `TABLE`. Alias: `MATERIALIZE TTL` - - `ALTER SETTINGS`. Niveau: `TABLE`. Alias: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - - `ALTER MOVE PARTITION`. Niveau: `TABLE`. Alias: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - - `ALTER FETCH PARTITION`. Niveau: `TABLE`. Alias: `FETCH PARTITION` - - `ALTER FREEZE PARTITION`. Niveau: `TABLE`. Alias: `FREEZE PARTITION` - - `ALTER VIEW` Niveau: `GROUP` - - `ALTER VIEW REFRESH`. Niveau: `VIEW`. Alias: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW` - - `ALTER VIEW MODIFY QUERY`. Niveau: `VIEW`. Alias: `ALTER TABLE MODIFY QUERY` - -Exemples de la façon dont cette hiérarchie est traitée: - -- Le `ALTER` privilège comprend tous les autres `ALTER*` privilège. -- `ALTER CONSTRAINT` comprendre `ALTER ADD CONSTRAINT` et `ALTER DROP CONSTRAINT` privilège. - -**Note** - -- Le `MODIFY SETTING` privilège permet de modifier les paramètres du moteur de table. In n'affecte pas les paramètres ou les paramètres de configuration du serveur. -- Le `ATTACH` opération a besoin de la [CREATE](#grant-create) privilège. -- Le `DETACH` opération a besoin de la [DROP](#grant-drop) privilège. -- Pour arrêter la mutation par le [KILL MUTATION](misc.md#kill-mutation) requête, vous devez avoir un privilège pour commencer cette mutation. Par exemple, si vous voulez arrêter l' `ALTER UPDATE` requête, vous avez besoin du `ALTER UPDATE`, `ALTER TABLE`, ou `ALTER` privilège. - -### CREATE {#grant-create} - -Permet d'effectuer des [CREATE](create.md) et [ATTACH](misc.md#attach) DDL-requêtes correspondant à la hiérarchie de privilèges suivante: - -- `CREATE`. Niveau: `GROUP` - - `CREATE DATABASE`. Niveau: `DATABASE` - - `CREATE TABLE`. Niveau: `TABLE` - - `CREATE VIEW`. Niveau: `VIEW` - - `CREATE DICTIONARY`. Niveau: `DICTIONARY` - - `CREATE TEMPORARY TABLE`. Niveau: `GLOBAL` - -**Note** - -- Pour supprimer la table créée, l'utilisateur doit [DROP](#grant-drop). - -### DROP {#grant-drop} - -Permet d'effectuer des [DROP](misc.md#drop) et [DETACH](misc.md#detach) requêtes correspondant à la hiérarchie de privilèges suivante: - -- `DROP`. Niveau: - - `DROP DATABASE`. Niveau: `DATABASE` - - `DROP TABLE`. Niveau: `TABLE` - - `DROP VIEW`. Niveau: `VIEW` - - `DROP DICTIONARY`. Niveau: `DICTIONARY` - -### TRUNCATE {#grant-truncate} - -Permet d'effectuer des [TRUNCATE](misc.md#truncate-statement) requête. - -Le niveau de privilège: `TABLE`. - -### OPTIMIZE {#grant-optimize} - -Permet d'effectuer les [OPTIMIZE TABLE](misc.md#misc_operations-optimize) requête. - -Le niveau de privilège: `TABLE`. - -### SHOW {#grant-show} - -Permet d'effectuer des `SHOW`, `DESCRIBE`, `USE`, et `EXISTS` requêtes, correspondant à la hiérarchie suivante des privilèges: - -- `SHOW`. Niveau: `GROUP` - - `SHOW DATABASES`. Niveau: `DATABASE`. Permet d'exécuter des `SHOW DATABASES`, `SHOW CREATE DATABASE`, `USE ` requête. - - `SHOW TABLES`. Niveau: `TABLE`. Permet d'exécuter des `SHOW TABLES`, `EXISTS `, `CHECK
` requête. - - `SHOW COLUMNS`. Niveau: `COLUMN`. Permet d'exécuter des `SHOW CREATE TABLE`, `DESCRIBE` requête. - - `SHOW DICTIONARIES`. Niveau: `DICTIONARY`. Permet d'exécuter des `SHOW DICTIONARIES`, `SHOW CREATE DICTIONARY`, `EXISTS ` requête. - -**Note** - -Un utilisateur a le `SHOW` privilège s'il a un autre privilège concernant la table, le dictionnaire ou la base de données spécifiés. - -### KILL QUERY {#grant-kill-query} - -Permet d'effectuer les [KILL](misc.md#kill-query-statement) requêtes correspondant à la hiérarchie de privilèges suivante: - -Le niveau de privilège: `GLOBAL`. - -**Note** - -`KILL QUERY` privilège permet à un utilisateur de tuer les requêtes des autres utilisateurs. - -### ACCESS MANAGEMENT {#grant-access-management} - -Permet à un utilisateur d'effectuer des requêtes qui gèrent les utilisateurs, les rôles et les stratégies de ligne. - -- `ACCESS MANAGEMENT`. Niveau: `GROUP` - - `CREATE USER`. Niveau: `GLOBAL` - - `ALTER USER`. Niveau: `GLOBAL` - - `DROP USER`. Niveau: `GLOBAL` - - `CREATE ROLE`. Niveau: `GLOBAL` - - `ALTER ROLE`. Niveau: `GLOBAL` - - `DROP ROLE`. Niveau: `GLOBAL` - - `ROLE ADMIN`. Niveau: `GLOBAL` - - `CREATE ROW POLICY`. Niveau: `GLOBAL`. Alias: `CREATE POLICY` - - `ALTER ROW POLICY`. Niveau: `GLOBAL`. Alias: `ALTER POLICY` - - `DROP ROW POLICY`. Niveau: `GLOBAL`. Alias: `DROP POLICY` - - `CREATE QUOTA`. Niveau: `GLOBAL` - - `ALTER QUOTA`. Niveau: `GLOBAL` - - `DROP QUOTA`. Niveau: `GLOBAL` - - `CREATE SETTINGS PROFILE`. Niveau: `GLOBAL`. Alias: `CREATE PROFILE` - - `ALTER SETTINGS PROFILE`. Niveau: `GLOBAL`. Alias: `ALTER PROFILE` - - `DROP SETTINGS PROFILE`. Niveau: `GLOBAL`. Alias: `DROP PROFILE` - - `SHOW ACCESS`. Niveau: `GROUP` - - `SHOW_USERS`. Niveau: `GLOBAL`. Alias: `SHOW CREATE USER` - - `SHOW_ROLES`. Niveau: `GLOBAL`. Alias: `SHOW CREATE ROLE` - - `SHOW_ROW_POLICIES`. Niveau: `GLOBAL`. Alias: `SHOW POLICIES`, `SHOW CREATE ROW POLICY`, `SHOW CREATE POLICY` - - `SHOW_QUOTAS`. Niveau: `GLOBAL`. Alias: `SHOW CREATE QUOTA` - - `SHOW_SETTINGS_PROFILES`. Niveau: `GLOBAL`. Alias: `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`, `SHOW CREATE PROFILE` - -Le `ROLE ADMIN` le privilège permet à un utilisateur d'accorder et de révoquer tous les rôles, y compris ceux qui ne lui sont pas accordés avec l'option admin. - -### SYSTEM {#grant-system} - -Permet à un utilisateur d'effectuer la [SYSTEM](system.md) requêtes correspondant à la hiérarchie de privilèges suivante. - -- `SYSTEM`. Niveau: `GROUP` - - `SYSTEM SHUTDOWN`. Niveau: `GLOBAL`. Alias: `SYSTEM KILL`, `SHUTDOWN` - - `SYSTEM DROP CACHE`. Alias: `DROP CACHE` - - `SYSTEM DROP DNS CACHE`. Niveau: `GLOBAL`. Alias: `SYSTEM DROP DNS`, `DROP DNS CACHE`, `DROP DNS` - - `SYSTEM DROP MARK CACHE`. Niveau: `GLOBAL`. Alias: `SYSTEM DROP MARK`, `DROP MARK CACHE`, `DROP MARKS` - - `SYSTEM DROP UNCOMPRESSED CACHE`. Niveau: `GLOBAL`. Alias: `SYSTEM DROP UNCOMPRESSED`, `DROP UNCOMPRESSED CACHE`, `DROP UNCOMPRESSED` - - `SYSTEM RELOAD`. Niveau: `GROUP` - - `SYSTEM RELOAD CONFIG`. Niveau: `GLOBAL`. Alias: `RELOAD CONFIG` - - `SYSTEM RELOAD DICTIONARY`. Niveau: `GLOBAL`. Alias: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Niveau: `GLOBAL`. Alias: R`ELOAD EMBEDDED DICTIONARIES` - - `SYSTEM MERGES`. Niveau: `TABLE`. Alias: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES` - - `SYSTEM TTL MERGES`. Niveau: `TABLE`. Alias: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES` - - `SYSTEM FETCHES`. Niveau: `TABLE`. Alias: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES` - - `SYSTEM MOVES`. Niveau: `TABLE`. Alias: `SYSTEM STOP MOVES`, `SYSTEM START MOVES`, `STOP MOVES`, `START MOVES` - - `SYSTEM SENDS`. Niveau: `GROUP`. Alias: `SYSTEM STOP SENDS`, `SYSTEM START SENDS`, `STOP SENDS`, `START SENDS` - - `SYSTEM DISTRIBUTED SENDS`. Niveau: `TABLE`. Alias: `SYSTEM STOP DISTRIBUTED SENDS`, `SYSTEM START DISTRIBUTED SENDS`, `STOP DISTRIBUTED SENDS`, `START DISTRIBUTED SENDS` - - `SYSTEM REPLICATED SENDS`. Niveau: `TABLE`. Alias: `SYSTEM STOP REPLICATED SENDS`, `SYSTEM START REPLICATED SENDS`, `STOP REPLICATED SENDS`, `START REPLICATED SENDS` - - `SYSTEM REPLICATION QUEUES`. Niveau: `TABLE`. Alias: `SYSTEM STOP REPLICATION QUEUES`, `SYSTEM START REPLICATION QUEUES`, `STOP REPLICATION QUEUES`, `START REPLICATION QUEUES` - - `SYSTEM SYNC REPLICA`. Niveau: `TABLE`. Alias: `SYNC REPLICA` - - `SYSTEM RESTART REPLICA`. Niveau: `TABLE`. Alias: `RESTART REPLICA` - - `SYSTEM FLUSH`. Niveau: `GROUP` - - `SYSTEM FLUSH DISTRIBUTED`. Niveau: `TABLE`. Alias: `FLUSH DISTRIBUTED` - - `SYSTEM FLUSH LOGS`. Niveau: `GLOBAL`. Alias: `FLUSH LOGS` - -Le `SYSTEM RELOAD EMBEDDED DICTIONARIES` privilège implicitement accordé par le `SYSTEM RELOAD DICTIONARY ON *.*` privilège. - -### INTROSPECTION {#grant-introspection} - -Permet l'utilisation de [introspection](../../operations/optimizing-performance/sampling-query-profiler.md) fonction. - -- `INTROSPECTION`. Niveau: `GROUP`. Alias: `INTROSPECTION FUNCTIONS` - - `addressToLine`. Niveau: `GLOBAL` - - `addressToSymbol`. Niveau: `GLOBAL` - - `demangle`. Niveau: `GLOBAL` - -### SOURCES {#grant-sources} - -Permet d'utiliser des sources de données externes. S'applique à [moteurs de table](../../engines/table-engines/index.md) et [les fonctions de table](../table-functions/index.md#table-functions). - -- `SOURCES`. Niveau: `GROUP` - - `FILE`. Niveau: `GLOBAL` - - `URL`. Niveau: `GLOBAL` - - `REMOTE`. Niveau: `GLOBAL` - - `YSQL`. Niveau: `GLOBAL` - - `ODBC`. Niveau: `GLOBAL` - - `JDBC`. Niveau: `GLOBAL` - - `HDFS`. Niveau: `GLOBAL` - - `S3`. Niveau: `GLOBAL` - -Le `SOURCES` privilège permet l'utilisation de toutes les sources. Vous pouvez également accorder un privilège pour chaque source individuellement. Pour utiliser les sources, vous avez besoin de privilèges supplémentaires. - -Exemple: - -- Pour créer une table avec [Moteur de table MySQL](../../engines/table-engines/integrations/mysql.md), vous avez besoin `CREATE TABLE (ON db.table_name)` et `MYSQL` privilège. -- L'utilisation de la [fonction de table mysql](../table-functions/mysql.md), vous avez besoin `CREATE TEMPORARY TABLE` et `MYSQL` privilège. - -### dictGet {#grant-dictget} - -- `dictGet`. Alias: `dictHas`, `dictGetHierarchy`, `dictIsIn` - -Permet à un utilisateur d'exécuter [dictGet](../functions/ext-dict-functions.md#dictget), [dictHas](../functions/ext-dict-functions.md#dicthas), [dictGetHierarchy](../functions/ext-dict-functions.md#dictgethierarchy), [dictisine](../functions/ext-dict-functions.md#dictisin) fonction. - -Niveau de privilège: `DICTIONARY`. - -**Exemple** - -- `GRANT dictGet ON mydb.mydictionary TO john` -- `GRANT dictGet ON mydictionary TO john` - -### ALL {#grant-all} - -Les subventions de tous les privilèges sur l'entité réglementée à un compte d'utilisateur ou un rôle. - -### NONE {#grant-none} - -N'accorde pas de privilèges. - -### ADMIN OPTION {#admin-option-privilege} - -Le `ADMIN OPTION` le privilège permet à un utilisateur d'accorder son rôle à un autre utilisateur. - -[Article Original](https://clickhouse.tech/docs/en/query_language/grant/) diff --git a/docs/fr/sql-reference/statements/index.md b/docs/fr/sql-reference/statements/index.md deleted file mode 100644 index f08d64cee39..00000000000 --- a/docs/fr/sql-reference/statements/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "D\xE9claration" -toc_priority: 31 ---- - - diff --git a/docs/fr/sql-reference/statements/insert-into.md b/docs/fr/sql-reference/statements/insert-into.md deleted file mode 100644 index 987594bae65..00000000000 --- a/docs/fr/sql-reference/statements/insert-into.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 34 -toc_title: INSERT INTO ---- - -## INSERT {#insert} - -L'ajout de données. - -Format de requête de base: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... -``` - -La requête peut spécifier une liste de colonnes à insérer `[(c1, c2, c3)]`. Dans ce cas, le reste des colonnes sont remplis avec: - -- Les valeurs calculées à partir `DEFAULT` expressions spécifiées dans la définition de la table. -- Zéros et chaînes vides, si `DEFAULT` les expressions ne sont pas définies. - -Si [strict_insert_defaults=1](../../operations/settings/settings.md), les colonnes qui n'ont pas `DEFAULT` défini doit être répertorié dans la requête. - -Les données peuvent être transmises à L'INSERT dans n'importe quel [format](../../interfaces/formats.md#formats) soutenu par ClickHouse. Le format doit être spécifié explicitement dans la requête: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set -``` - -For example, the following query format is identical to the basic version of INSERT … VALUES: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ... -``` - -ClickHouse supprime tous les espaces et un saut de ligne (s'il y en a un) avant les données. Lors de la formation d'une requête, nous recommandons de placer les données sur une nouvelle ligne après les opérateurs de requête (ceci est important si les données commencent par des espaces). - -Exemple: - -``` sql -INSERT INTO t FORMAT TabSeparated -11 Hello, world! -22 Qwerty -``` - -Vous pouvez insérer des données séparément de la requête à l'aide du client de ligne de commande ou de L'interface HTTP. Pour plus d'informations, consultez la section “[Interface](../../interfaces/index.md#interfaces)”. - -### Contraintes {#constraints} - -Si la table a [contraintes](create.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped. - -### Insertion des résultats de `SELECT` {#insert_query_insert-select} - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... -``` - -Les colonnes sont mappées en fonction de leur position dans la clause SELECT. Cependant, leurs noms dans L'expression SELECT et la table pour INSERT peuvent différer. Si nécessaire, la coulée de type est effectuée. - -Aucun des formats de données à l'exception des Valeurs permettent de définir des valeurs d'expressions telles que `now()`, `1 + 2` et ainsi de suite. Le format des valeurs permet une utilisation limitée des expressions, mais ce n'est pas recommandé, car dans ce cas, un code inefficace est utilisé pour leur exécution. - -Les autres requêtes de modification des parties de données ne sont pas prises en charge: `UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`. -Cependant, vous pouvez supprimer les anciennes données en utilisant `ALTER TABLE ... DROP PARTITION`. - -`FORMAT` la clause doit être spécifié à la fin de la requête si `SELECT` la clause contient la fonction de table [entrée()](../table-functions/input.md). - -### Considérations De Performance {#performance-considerations} - -`INSERT` trie les données d'entrée par la clé primaire et les divise en partitions par une clé de partition. Si vous insérez des données dans plusieurs partitions à la fois, cela peut réduire considérablement les performances de l' `INSERT` requête. Pour éviter cela: - -- Ajoutez des données en lots assez importants, tels que 100 000 lignes à la fois. -- Groupez les données par une clé de partition avant de les télécharger sur ClickHouse. - -Les performances ne diminueront pas si: - -- Les données sont ajoutées en temps réel. -- Vous téléchargez des données qui sont généralement triées par heure. - -[Article Original](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/fr/sql-reference/statements/misc.md b/docs/fr/sql-reference/statements/misc.md deleted file mode 100644 index 4631f856266..00000000000 --- a/docs/fr/sql-reference/statements/misc.md +++ /dev/null @@ -1,358 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 41 -toc_title: Autre ---- - -# Diverses Requêtes {#miscellaneous-queries} - -## ATTACH {#attach} - -Cette requête est exactement la même que `CREATE`, mais - -- Au lieu de la parole `CREATE` il utilise le mot `ATTACH`. -- La requête ne crée pas de données sur le disque, mais suppose que les données sont déjà aux endroits appropriés, et ajoute simplement des informations sur la table au serveur. - Après avoir exécuté une requête ATTACH, le serveur connaîtra l'existence de la table. - -Si la table a été précédemment détachée (`DETACH`), ce qui signifie que sa structure est connue, vous pouvez utiliser un raccourci sans définir la structure. - -``` sql -ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] -``` - -Cette requête est utilisée lors du démarrage du serveur. Le serveur stocke les métadonnées de la table sous forme de fichiers avec `ATTACH` requêtes, qu'il exécute simplement au lancement (à l'exception des tables système, qui sont explicitement créées sur le serveur). - -## CHECK TABLE {#check-table} - -Vérifie si les données de la table sont corrompues. - -``` sql -CHECK TABLE [db.]name -``` - -Le `CHECK TABLE` requête compare réelle des tailles de fichier avec les valeurs attendues qui sont stockés sur le serveur. Si le fichier tailles ne correspondent pas aux valeurs stockées, cela signifie que les données sont endommagées. Cela peut être causé, par exemple, par un plantage du système lors de l'exécution de la requête. - -La réponse de la requête contient `result` colonne avec une seule ligne. La ligne a une valeur de -[Booléen](../../sql-reference/data-types/boolean.md) type: - -- 0 - les données de la table sont corrompues. -- 1 - les données maintiennent l'intégrité. - -Le `CHECK TABLE` query prend en charge les moteurs de table suivants: - -- [Journal](../../engines/table-engines/log-family/log.md) -- [TinyLog](../../engines/table-engines/log-family/tinylog.md) -- [StripeLog](../../engines/table-engines/log-family/stripelog.md) -- [Famille MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) - -Effectué sur les tables avec un autre moteur de table provoque une exception. - -Les moteurs de la `*Log` la famille ne fournit pas de récupération automatique des données en cas d'échec. L'utilisation de la `CHECK TABLE` requête pour suivre la perte de données en temps opportun. - -Pour `MergeTree` moteurs de la famille, le `CHECK TABLE` query affiche un État de vérification pour chaque partie de données individuelle d'une table sur le serveur local. - -**Si les données sont corrompues** - -Si la table est corrompue, vous pouvez copier les données non corrompues dans une autre table. Pour ce faire: - -1. Créez une nouvelle table avec la même structure que la table endommagée. Pour ce faire exécutez la requête `CREATE TABLE AS `. -2. Définir le [max_threads](../../operations/settings/settings.md#settings-max_threads) la valeur 1 pour traiter la requête suivante dans un seul thread. Pour ce faire, exécutez la requête `SET max_threads = 1`. -3. Exécuter la requête `INSERT INTO SELECT * FROM `. Cette demande copie les données non corrompues de la table endommagée vers une autre table. Seules les données avant la partie corrompue seront copiées. -4. Redémarrez l' `clickhouse-client` pour réinitialiser l' `max_threads` valeur. - -## DESCRIBE TABLE {#misc-describe-table} - -``` sql -DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] -``` - -Renvoie ce qui suit `String` les colonnes de type: - -- `name` — Column name. -- `type`— Column type. -- `default_type` — Clause that is used in [expression par défaut](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` ou `ALIAS`). Column contient une chaîne vide, si l'expression par défaut n'est pas spécifiée. -- `default_expression` — Value specified in the `DEFAULT` clause. -- `comment_expression` — Comment text. - -Les structures de données imbriquées sont sorties dans “expanded” format. Chaque colonne est affichée séparément, avec le nom après un point. - -## DETACH {#detach} - -Supprime les informations sur le ‘name’ table du serveur. Le serveur cesse de connaître l'existence de la table. - -``` sql -DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -Cela ne supprime pas les données ou les métadonnées de la table. Lors du prochain lancement du serveur, le serveur Lira les métadonnées et découvrira à nouveau la table. -De même, un “detached” tableau peut être re-attaché en utilisant le `ATTACH` requête (à l'exception des tables système, qui n'ont pas de stocker les métadonnées pour eux). - -Il n'y a pas de `DETACH DATABASE` requête. - -## DROP {#drop} - -Cette requête a deux types: `DROP DATABASE` et `DROP TABLE`. - -``` sql -DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] -``` - -Supprime toutes les tables à l'intérieur de la ‘db’ la base de données, puis supprime le ‘db’ la base de données elle-même. -Si `IF EXISTS` est spécifié, il ne renvoie pas d'erreur si la base de données n'existe pas. - -``` sql -DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -Supprime la table. -Si `IF EXISTS` est spécifié, il ne renvoie pas d'erreur si la table n'existe pas ou si la base de données n'existe pas. - - DROP DICTIONARY [IF EXISTS] [db.]name - -Delets le dictionnaire. -Si `IF EXISTS` est spécifié, il ne renvoie pas d'erreur si la table n'existe pas ou si la base de données n'existe pas. - -## DROP USER {#drop-user-statement} - -Supprime un utilisateur. - -### Syntaxe {#drop-user-syntax} - -``` sql -DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] -``` - -## DROP ROLE {#drop-role-statement} - -Supprime un rôle. - -Le rôle supprimé est révoqué de toutes les entités où il a été accordé. - -### Syntaxe {#drop-role-syntax} - -``` sql -DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] -``` - -## DROP ROW POLICY {#drop-row-policy-statement} - -Supprime une stratégie de ligne. - -La stratégie de ligne supprimée est révoquée de toutes les entités sur lesquelles elle a été affectée. - -### Syntaxe {#drop-row-policy-syntax} - -``` sql -DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] -``` - -## DROP QUOTA {#drop-quota-statement} - -Supprime un quota. - -Le quota supprimé est révoqué de toutes les entités où il a été affecté. - -### Syntaxe {#drop-quota-syntax} - -``` sql -DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] -``` - -## DROP SETTINGS PROFILE {#drop-settings-profile-statement} - -Supprime un quota. - -Le quota supprimé est révoqué de toutes les entités où il a été affecté. - -### Syntaxe {#drop-settings-profile-syntax} - -``` sql -DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] -``` - -## EXISTS {#exists-statement} - -``` sql -EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] -``` - -Renvoie un seul `UInt8`- type colonne, qui contient la valeur unique `0` si la table ou base de données n'existe pas, ou `1` si la table existe dans la base de données spécifiée. - -## KILL QUERY {#kill-query-statement} - -``` sql -KILL QUERY [ON CLUSTER cluster] - WHERE - [SYNC|ASYNC|TEST] - [FORMAT format] -``` - -Tente de mettre fin de force aux requêtes en cours d'exécution. -Les requêtes à terminer sont sélectionnées dans le système.processus en utilisant les critères définis dans le `WHERE` la clause de la `KILL` requête. - -Exemple: - -``` sql --- Forcibly terminates all queries with the specified query_id: -KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' - --- Synchronously terminates all queries run by 'username': -KILL QUERY WHERE user='username' SYNC -``` - -Les utilisateurs en lecture seule peuvent uniquement arrêter leurs propres requêtes. - -Par défaut, la version asynchrone des requêtes est utilisé (`ASYNC`), qui n'attend pas la confirmation que les requêtes se sont arrêtées. - -La version synchrone (`SYNC`) attend que toutes les requêtes d'arrêter et affiche des informations sur chaque processus s'arrête. -La réponse contient l' `kill_status` la colonne, qui peut prendre les valeurs suivantes: - -1. ‘finished’ – The query was terminated successfully. -2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. -3. The other values ​​explain why the query can't be stopped. - -Une requête de test (`TEST`) vérifie uniquement les droits de l'utilisateur et affiche une liste de requêtes à arrêter. - -## KILL MUTATION {#kill-mutation} - -``` sql -KILL MUTATION [ON CLUSTER cluster] - WHERE - [TEST] - [FORMAT format] -``` - -Essaie d'annuler et supprimer [mutation](alter.md#alter-mutations) actuellement en cours d'exécution. Les Mutations à annuler sont sélectionnées parmi [`system.mutations`](../../operations/system-tables.md#system_tables-mutations) tableau à l'aide du filtre spécifié par le `WHERE` la clause de la `KILL` requête. - -Une requête de test (`TEST`) vérifie uniquement les droits de l'utilisateur et affiche une liste de requêtes à arrêter. - -Exemple: - -``` sql --- Cancel and remove all mutations of the single table: -KILL MUTATION WHERE database = 'default' AND table = 'table' - --- Cancel the specific mutation: -KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' -``` - -The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). - -Les modifications déjà apportées par la mutation ne sont pas annulées. - -## OPTIMIZE {#misc_operations-optimize} - -``` sql -OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] -``` - -Cette requête tente d'initialiser une fusion non programmée de parties de données pour les tables avec un moteur de [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) famille. - -Le `OPTMIZE` la requête est également prise en charge pour [MaterializedView](../../engines/table-engines/special/materializedview.md) et la [Tampon](../../engines/table-engines/special/buffer.md) moteur. Les autres moteurs de table ne sont pas pris en charge. - -Lorsque `OPTIMIZE` est utilisé avec le [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) famille de moteurs de table, ClickHouse crée une tâche pour la fusion et attend l'exécution sur tous les nœuds (si le `replication_alter_partitions_sync` paramètre est activé). - -- Si `OPTIMIZE` n'effectue pas de fusion pour une raison quelconque, il ne notifie pas le client. Pour activer les notifications, utilisez [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) paramètre. -- Si vous spécifiez un `PARTITION`, seule la partition spécifiée est optimisé. [Comment définir l'expression de la partition](alter.md#alter-how-to-specify-part-expr). -- Si vous spécifiez `FINAL`, l'optimisation est effectuée, même lorsque toutes les données sont déjà dans une partie. -- Si vous spécifiez `DEDUPLICATE`, alors des lignes complètement identiques seront dédupliquées (toutes les colonnes sont comparées), cela n'a de sens que pour le moteur MergeTree. - -!!! warning "Avertissement" - `OPTIMIZE` ne peut pas réparer le “Too many parts” erreur. - -## RENAME {#misc_operations-rename} - -Renomme une ou plusieurs tables. - -``` sql -RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] -``` - -Toutes les tables sont renommées sous verrouillage global. Renommer des tables est une opération légère. Si vous avez indiqué une autre base de données après TO, la table sera déplacée vers cette base de données. Cependant, les répertoires contenant des bases de données doivent résider dans le même système de fichiers (sinon, une erreur est renvoyée). - -## SET {#query-set} - -``` sql -SET param = value -``` - -Assigner `value` à l' `param` [paramètre](../../operations/settings/index.md) pour la session en cours. Vous ne pouvez pas modifier [les paramètres du serveur](../../operations/server-configuration-parameters/index.md) de cette façon. - -Vous pouvez également définir toutes les valeurs de certains paramètres de profil dans une seule requête. - -``` sql -SET profile = 'profile-name-from-the-settings-file' -``` - -Pour plus d'informations, voir [Paramètre](../../operations/settings/settings.md). - -## SET ROLE {#set-role-statement} - -Active les rôles pour l'utilisateur actuel. - -### Syntaxe {#set-role-syntax} - -``` sql -SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]} -``` - -## SET DEFAULT ROLE {#set-default-role-statement} - -Définit les rôles par défaut à un utilisateur. - -Les rôles par défaut sont automatiquement activés lors de la connexion de l'utilisateur. Vous pouvez définir par défaut uniquement les rôles précédemment accordés. Si le rôle n'est pas accordé à un utilisateur, ClickHouse lève une exception. - -### Syntaxe {#set-default-role-syntax} - -``` sql -SET DEFAULT ROLE {NONE | role [,...] | ALL | ALL EXCEPT role [,...]} TO {user|CURRENT_USER} [,...] -``` - -### Exemple {#set-default-role-examples} - -Définir plusieurs rôles par défaut à un utilisateur: - -``` sql -SET DEFAULT ROLE role1, role2, ... TO user -``` - -Définissez tous les rôles accordés par défaut sur un utilisateur: - -``` sql -SET DEFAULT ROLE ALL TO user -``` - -Purger les rôles par défaut d'un utilisateur: - -``` sql -SET DEFAULT ROLE NONE TO user -``` - -Définissez tous les rôles accordés par défaut à l'exception de certains d'entre eux: - -``` sql -SET DEFAULT ROLE ALL EXCEPT role1, role2 TO user -``` - -## TRUNCATE {#truncate-statement} - -``` sql -TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -Supprime toutes les données d'une table. Lorsque la clause `IF EXISTS` est omis, la requête renvoie une erreur si la table n'existe pas. - -Le `TRUNCATE` la requête n'est pas prise en charge pour [Vue](../../engines/table-engines/special/view.md), [Fichier](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md) et [NULL](../../engines/table-engines/special/null.md) table des moteurs. - -## USE {#use} - -``` sql -USE db -``` - -Vous permet de définir la base de données actuelle pour la session. -La base de données actuelle est utilisée pour rechercher des tables si la base de données n'est pas explicitement définie dans la requête avec un point avant le nom de la table. -Cette requête ne peut pas être faite lors de l'utilisation du protocole HTTP, car il n'y a pas de concept de session. - -[Article Original](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/fr/sql-reference/statements/revoke.md b/docs/fr/sql-reference/statements/revoke.md deleted file mode 100644 index 6137cc30f8c..00000000000 --- a/docs/fr/sql-reference/statements/revoke.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 40 -toc_title: REVOKE ---- - -# REVOKE {#revoke} - -Révoque les privilèges des utilisateurs ou rôles. - -## Syntaxe {#revoke-syntax} - -**Révocation des privilèges des utilisateurs** - -``` sql -REVOKE [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} FROM {user | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user | CURRENT_USER} [,...] -``` - -**Révocation des rôles des utilisateurs** - -``` sql -REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] role [,...] FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] -``` - -## Description {#revoke-description} - -Pour révoquer certains privilèges, vous pouvez utiliser un privilège de portée plus large que vous envisagez de révoquer. Par exemple, si un utilisateur a la `SELECT (x,y)` privilège, administrateur peut effectuer `REVOKE SELECT(x,y) ...`, ou `REVOKE SELECT * ...` ou même `REVOKE ALL PRIVILEGES ...` requête de révoquer ce privilège. - -### Révocations Partielles {#partial-revokes-dscr} - -Vous pouvez révoquer une partie d'un privilège. Par exemple, si un utilisateur a la `SELECT *.*` Privilège vous pouvez révoquer un privilège pour lire les données d'une table ou d'une base de données. - -## Exemple {#revoke-example} - -Subvention de l' `john` compte utilisateur avec le privilège de sélectionner parmi toutes les bases de données `accounts` un: - -``` sql -GRANT SELECT ON *.* TO john; -REVOKE SELECT ON accounts.* FROM john; -``` - -Subvention de l' `mira` compte utilisateur avec le privilège de sélectionner parmi toutes les colonnes `accounts.staff` tableau à l'exception de la `wage` un. - -``` sql -GRANT SELECT ON accounts.staff TO mira; -REVOKE SELECT(wage) ON accounts.staff FROM mira; -``` - -{## [Article Original](https://clickhouse.tech/docs/en/operations/settings/settings/) ##} diff --git a/docs/fr/sql-reference/statements/select/array-join.md b/docs/fr/sql-reference/statements/select/array-join.md deleted file mode 100644 index 07b27d5d16c..00000000000 --- a/docs/fr/sql-reference/statements/select/array-join.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause de jointure de tableau {#select-array-join-clause} - -C'est une opération courante pour les tables qui contiennent une colonne de tableau pour produire une nouvelle table qui a une colonne avec chaque élément de tableau individuel de cette colonne initiale, tandis que les valeurs des autres colonnes sont dupliquées. C'est le cas de fond de ce `ARRAY JOIN` la clause le fait. - -Son nom vient du fait qu'il peut être regardé comme l'exécution de `JOIN` avec un tableau ou une structure de données imbriquée. L'intention est similaire à la [arrayJoin](../../functions/array-join.md#functions_arrayjoin) fonction, mais la fonctionnalité de la clause est plus large. - -Syntaxe: - -``` sql -SELECT -FROM -[LEFT] ARRAY JOIN -[WHERE|PREWHERE ] -... -``` - -Vous ne pouvez en spécifier qu'un `ARRAY JOIN` la clause dans un `SELECT` requête. - -Types pris en charge de `ARRAY JOIN` sont énumérés ci-dessous: - -- `ARRAY JOIN` - Dans le cas de base, les tableaux vides ne sont pas inclus dans le résultat de `JOIN`. -- `LEFT ARRAY JOIN` - Le résultat de `JOIN` contient des lignes avec des tableaux vides. La valeur d'un tableau vide est définie sur la valeur par défaut pour le type d'élément de tableau (généralement 0, chaîne vide ou NULL). - -## Exemples de jointure de tableau de base {#basic-array-join-examples} - -Les exemples ci-dessous illustrent l'utilisation de la `ARRAY JOIN` et `LEFT ARRAY JOIN` clause. Créons une table avec un [Tableau](../../../sql-reference/data-types/array.md) tapez colonne et insérez des valeurs dedans: - -``` sql -CREATE TABLE arrays_test -( - s String, - arr Array(UInt8) -) ENGINE = Memory; - -INSERT INTO arrays_test -VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); -``` - -``` text -┌─s───────────┬─arr─────┐ -│ Hello │ [1,2] │ -│ World │ [3,4,5] │ -│ Goodbye │ [] │ -└─────────────┴─────────┘ -``` - -L'exemple ci-dessous utilise la `ARRAY JOIN` clause: - -``` sql -SELECT s, arr -FROM arrays_test -ARRAY JOIN arr; -``` - -``` text -┌─s─────┬─arr─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ World │ 3 │ -│ World │ 4 │ -│ World │ 5 │ -└───────┴─────┘ -``` - -L'exemple suivant utilise l' `LEFT ARRAY JOIN` clause: - -``` sql -SELECT s, arr -FROM arrays_test -LEFT ARRAY JOIN arr; -``` - -``` text -┌─s───────────┬─arr─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ World │ 3 │ -│ World │ 4 │ -│ World │ 5 │ -│ Goodbye │ 0 │ -└─────────────┴─────┘ -``` - -## À L'Aide D'Alias {#using-aliases} - -Un alias peut être spécifié pour un tableau `ARRAY JOIN` clause. Dans ce cas, un élément de tableau peut être consulté par ce pseudonyme, mais le tableau lui-même est accessible par le nom d'origine. Exemple: - -``` sql -SELECT s, arr, a -FROM arrays_test -ARRAY JOIN arr AS a; -``` - -``` text -┌─s─────┬─arr─────┬─a─┐ -│ Hello │ [1,2] │ 1 │ -│ Hello │ [1,2] │ 2 │ -│ World │ [3,4,5] │ 3 │ -│ World │ [3,4,5] │ 4 │ -│ World │ [3,4,5] │ 5 │ -└───────┴─────────┴───┘ -``` - -En utilisant des alias, vous pouvez effectuer `ARRAY JOIN` avec un groupe externe. Exemple: - -``` sql -SELECT s, arr_external -FROM arrays_test -ARRAY JOIN [1, 2, 3] AS arr_external; -``` - -``` text -┌─s───────────┬─arr_external─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ Hello │ 3 │ -│ World │ 1 │ -│ World │ 2 │ -│ World │ 3 │ -│ Goodbye │ 1 │ -│ Goodbye │ 2 │ -│ Goodbye │ 3 │ -└─────────────┴──────────────┘ -``` - -Plusieurs tableaux peuvent être séparés par des virgules `ARRAY JOIN` clause. Dans ce cas, `JOIN` est effectuée avec eux simultanément (la somme directe, pas le produit cartésien). Notez que tous les tableaux doivent avoir la même taille. Exemple: - -``` sql -SELECT s, arr, a, num, mapped -FROM arrays_test -ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; -``` - -``` text -┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ -│ Hello │ [1,2] │ 1 │ 1 │ 2 │ -│ Hello │ [1,2] │ 2 │ 2 │ 3 │ -│ World │ [3,4,5] │ 3 │ 1 │ 4 │ -│ World │ [3,4,5] │ 4 │ 2 │ 5 │ -│ World │ [3,4,5] │ 5 │ 3 │ 6 │ -└───────┴─────────┴───┴─────┴────────┘ -``` - -L'exemple ci-dessous utilise la [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) fonction: - -``` sql -SELECT s, arr, a, num, arrayEnumerate(arr) -FROM arrays_test -ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; -``` - -``` text -┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ -│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ -│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ -│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ -│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ -│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ -└───────┴─────────┴───┴─────┴─────────────────────┘ -``` - -## Jointure de tableau avec la Structure de données imbriquée {#array-join-with-nested-data-structure} - -`ARRAY JOIN` fonctionne également avec [structures de données imbriquées](../../../sql-reference/data-types/nested-data-structures/nested.md): - -``` sql -CREATE TABLE nested_test -( - s String, - nest Nested( - x UInt8, - y UInt32) -) ENGINE = Memory; - -INSERT INTO nested_test -VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); -``` - -``` text -┌─s───────┬─nest.x──┬─nest.y─────┐ -│ Hello │ [1,2] │ [10,20] │ -│ World │ [3,4,5] │ [30,40,50] │ -│ Goodbye │ [] │ [] │ -└─────────┴─────────┴────────────┘ -``` - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN nest; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─┐ -│ Hello │ 1 │ 10 │ -│ Hello │ 2 │ 20 │ -│ World │ 3 │ 30 │ -│ World │ 4 │ 40 │ -│ World │ 5 │ 50 │ -└───────┴────────┴────────┘ -``` - -Lorsque vous spécifiez des noms de structures de données imbriquées dans `ARRAY JOIN` le sens est le même que `ARRAY JOIN` avec tous les éléments du tableau qui la compose. Des exemples sont énumérés ci-dessous: - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN `nest.x`, `nest.y`; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─┐ -│ Hello │ 1 │ 10 │ -│ Hello │ 2 │ 20 │ -│ World │ 3 │ 30 │ -│ World │ 4 │ 40 │ -│ World │ 5 │ 50 │ -└───────┴────────┴────────┘ -``` - -Cette variation a également du sens: - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN `nest.x`; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─────┐ -│ Hello │ 1 │ [10,20] │ -│ Hello │ 2 │ [10,20] │ -│ World │ 3 │ [30,40,50] │ -│ World │ 4 │ [30,40,50] │ -│ World │ 5 │ [30,40,50] │ -└───────┴────────┴────────────┘ -``` - -Un alias peut être utilisé pour une structure de données imbriquée, afin de sélectionner `JOIN` le résultat ou le tableau source. Exemple: - -``` sql -SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN nest AS n; -``` - -``` text -┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ -│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ -│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ -│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ -│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ -│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ -└───────┴─────┴─────┴─────────┴────────────┘ -``` - -Exemple d'utilisation de l' [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) fonction: - -``` sql -SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num -FROM nested_test -ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; -``` - -``` text -┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ -│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ -│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ -│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ -│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ -│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ -└───────┴─────┴─────┴─────────┴────────────┴─────┘ -``` - -## Détails De Mise En Œuvre {#implementation-details} - -L'ordre d'exécution de la requête est optimisé lors de l'exécution `ARRAY JOIN`. Bien `ARRAY JOIN` doit toujours être spécifié avant l' [WHERE](where.md)/[PREWHERE](prewhere.md) dans une requête, techniquement, ils peuvent être exécutés dans n'importe quel ordre, sauf résultat de `ARRAY JOIN` est utilisé pour le filtrage. L'ordre de traitement est contrôlée par l'optimiseur de requête. diff --git a/docs/fr/sql-reference/statements/select/distinct.md b/docs/fr/sql-reference/statements/select/distinct.md deleted file mode 100644 index 94552018c98..00000000000 --- a/docs/fr/sql-reference/statements/select/distinct.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# La Clause DISTINCT {#select-distinct} - -Si `SELECT DISTINCT` est spécifié, seules les lignes uniques restera un résultat de requête. Ainsi, une seule ligne restera hors de tous les ensembles de lignes entièrement correspondantes dans le résultat. - -## Le Traitement Null {#null-processing} - -`DISTINCT` fonctionne avec [NULL](../../syntax.md#null-literal) comme si `NULL` ont une valeur spécifique, et `NULL==NULL`. En d'autres termes, dans le `DISTINCT` résultats, différentes combinaisons avec `NULL` une fois seulement. Elle diffère de `NULL` traitement dans la plupart des autres contextes. - -## Alternative {#alternatives} - -Il est possible d'obtenir le même résultat en appliquant [GROUP BY](group-by.md) sur le même ensemble de valeurs, comme spécifié comme `SELECT` clause, sans utiliser de fonctions d'agrégation. Mais il y a peu de différences de `GROUP BY` approche: - -- `DISTINCT` peut être utilisé avec d' `GROUP BY`. -- Lorsque [ORDER BY](order-by.md) est omis et [LIMIT](limit.md) est définie, la requête s'arrête immédiatement après le nombre de lignes différentes, a été lu. -- Les blocs de données sont produits au fur et à mesure qu'ils sont traités, sans attendre que la requête entière se termine. - -## Limitation {#limitations} - -`DISTINCT` n'est pas pris en charge si `SELECT` a au moins une colonne de tableau. - -## Exemple {#examples} - -Clickhouse prend en charge l'utilisation du `DISTINCT` et `ORDER BY` clauses pour différentes colonnes dans une requête. Le `DISTINCT` la clause est exécutée avant la `ORDER BY` clause. - -Exemple de table: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 1 │ 2 │ -│ 3 │ 3 │ -│ 2 │ 4 │ -└───┴───┘ -``` - -Lors de la sélection de données avec le `SELECT DISTINCT a FROM t1 ORDER BY b ASC` requête, nous obtenons le résultat suivant: - -``` text -┌─a─┐ -│ 2 │ -│ 1 │ -│ 3 │ -└───┘ -``` - -Si nous changeons la direction de tri `SELECT DISTINCT a FROM t1 ORDER BY b DESC`, nous obtenons le résultat suivant: - -``` text -┌─a─┐ -│ 3 │ -│ 1 │ -│ 2 │ -└───┘ -``` - -Rangée `2, 4` a été coupé avant de les trier. - -Prenez en compte cette spécificité d'implémentation lors de la programmation des requêtes. diff --git a/docs/fr/sql-reference/statements/select/format.md b/docs/fr/sql-reference/statements/select/format.md deleted file mode 100644 index a88bb7831ba..00000000000 --- a/docs/fr/sql-reference/statements/select/format.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# FORMAT de la Clause {#format-clause} - -Clickhouse prend en charge une large gamme de [formats de sérialisation](../../../interfaces/formats.md) qui peut être utilisé sur les résultats de la requête entre autres choses. Il existe plusieurs façons de choisir un format pour `SELECT` de sortie, l'un d'eux est de spécifier `FORMAT format` à la fin de la requête pour obtenir les données résultantes dans tout format spécifique. - -Un format spécifique peut être utilisé pour des raisons de commodité, d'intégration avec d'autres systèmes ou d'amélioration des performances. - -## Format Par Défaut {#default-format} - -Si l' `FORMAT` la clause est omise, le format par défaut est utilisé, ce qui dépend à la fois des paramètres et de l'interface utilisée pour accéder au serveur ClickHouse. Pour l' [Interface HTTP](../../../interfaces/http.md) et la [client de ligne de commande](../../../interfaces/cli.md) en mode batch, le format par défaut est `TabSeparated`. Pour le client de ligne de commande en mode interactif, le format par défaut est `PrettyCompact` (il produit des tables compactes lisibles par l'homme). - -## Détails De Mise En Œuvre {#implementation-details} - -Lors de l'utilisation du client de ligne de commande, les données sont toujours transmises sur le réseau dans un format efficace interne (`Native`). Le client interprète indépendamment le `FORMAT` clause de la requête et formate les données elles-mêmes (soulageant ainsi le réseau et le serveur de la charge supplémentaire). diff --git a/docs/fr/sql-reference/statements/select/from.md b/docs/fr/sql-reference/statements/select/from.md deleted file mode 100644 index 964ffdd13fb..00000000000 --- a/docs/fr/sql-reference/statements/select/from.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# De la Clause {#select-from} - -Le `FROM` clause spécifie la source à partir de laquelle lire les données: - -- [Table](../../../engines/table-engines/index.md) -- [Sous-requête](index.md) {##TODO: meilleur lien ##} -- [Fonction de Table](../../table-functions/index.md#table-functions) - -[JOIN](join.md) et [ARRAY JOIN](array-join.md) les clauses peuvent également être utilisées pour étendre la fonctionnalité de la `FROM` clause. - -Subquery est un autre `SELECT` requête qui peut être spécifié entre parenthèses à l'intérieur `FROM` clause. - -`FROM` la clause peut contenir plusieurs sources de données, séparées par des virgules, ce qui équivaut à effectuer [CROSS JOIN](join.md) sur eux. - -## Modificateur FINAL {#select-from-final} - -Lorsque `FINAL` est spécifié, ClickHouse fusionne complètement les données avant de renvoyer le résultat et effectue ainsi toutes les transformations de données qui se produisent lors des fusions pour le moteur de table donné. - -Il est applicable lors de la sélection de données à partir de tables qui utilisent [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md)-la famille de moteurs (à l'exception de `GraphiteMergeTree`). Également pris en charge pour: - -- [Répliqué](../../../engines/table-engines/mergetree-family/replication.md) les versions de `MergeTree` moteur. -- [Vue](../../../engines/table-engines/special/view.md), [Tampon](../../../engines/table-engines/special/buffer.md), [Distribué](../../../engines/table-engines/special/distributed.md), et [MaterializedView](../../../engines/table-engines/special/materializedview.md) moteurs qui fonctionnent sur d'autres moteurs, à condition qu'ils aient été créés sur `MergeTree`-tables de moteur. - -### Inconvénient {#drawbacks} - -Requêtes qui utilisent `FINAL` sont exécutés pas aussi vite que les requêtes similaires qui ne le font pas, car: - -- La requête est exécutée dans un seul thread et les données sont fusionnées lors de l'exécution de la requête. -- Les requêtes avec `FINAL` lire les colonnes de clé primaire en plus des colonnes spécifiées dans la requête. - -**Dans la plupart des cas, évitez d'utiliser `FINAL`.** L'approche commune consiste à utiliser différentes requêtes qui supposent les processus d'arrière-plan du `MergeTree` le moteur n'est pas encore arrivé et y faire face en appliquant l'agrégation (par exemple, pour éliminer les doublons). {##TODO: exemples ##} - -## Détails De Mise En Œuvre {#implementation-details} - -Si l' `FROM` la clause est omise, les données seront lues à partir `system.one` table. -Le `system.one` table contient exactement une ligne (cette table remplit le même but que la table double trouvée dans d'autres SGBD). - -Pour exécuter une requête, toutes les colonnes mentionnées dans la requête sont extraites de la table appropriée. Toutes les colonnes non nécessaires pour la requête externe sont rejetées des sous-requêtes. -Si une requête ne répertorie aucune colonne (par exemple, `SELECT count() FROM t`), une colonne est extraite de la table de toute façon (la plus petite est préférée), afin de calculer le nombre de lignes. diff --git a/docs/fr/sql-reference/statements/select/group-by.md b/docs/fr/sql-reference/statements/select/group-by.md deleted file mode 100644 index 9d1b5c276d5..00000000000 --- a/docs/fr/sql-reference/statements/select/group-by.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause GROUP BY {#select-group-by-clause} - -`GROUP BY` la clause change le `SELECT` requête dans un mode d'agrégation, qui fonctionne comme suit: - -- `GROUP BY` clause contient une liste des expressions (ou une seule expression, qui est considéré comme la liste de longueur). Cette liste agit comme un “grouping key”, tandis que chaque expression individuelle sera appelée “key expressions”. -- Toutes les expressions dans le [SELECT](index.md), [HAVING](having.md), et [ORDER BY](order-by.md) clause **devoir** être calculé sur la base d'expressions clés **ou** sur [les fonctions d'agrégation](../../../sql-reference/aggregate-functions/index.md) sur les expressions non-clés (y compris les colonnes simples). En d'autres termes, chaque colonne sélectionnée dans la table doit être utilisée soit dans une expression de clé, soit dans une fonction d'agrégat, mais pas les deux. -- Résultat de l'agrégation de `SELECT` la requête contiendra autant de lignes qu'il y avait des valeurs uniques de “grouping key” dans la table source. Habituellement, cela réduit considérablement le nombre de lignes, souvent par ordre de grandeur, mais pas nécessairement: le nombre de lignes reste le même si tous “grouping key” les valeurs sont distinctes. - -!!! note "Note" - Il existe un moyen supplémentaire d'exécuter l'agrégation sur une table. Si une requête ne contient que des colonnes de table à l'intérieur des fonctions `GROUP BY clause` peut être omis, et l'agrégation par un ensemble vide de touches est supposé. Ces interrogations renvoient toujours exactement une ligne. - -## Le Traitement NULL {#null-processing} - -Pour le regroupement, ClickHouse interprète [NULL](../../syntax.md#null-literal) comme une valeur, et `NULL==NULL`. Elle diffère de `NULL` traitement dans la plupart des autres contextes. - -Voici un exemple pour montrer ce que cela signifie. - -Supposons que vous avez cette table: - -``` text -┌─x─┬────y─┐ -│ 1 │ 2 │ -│ 2 │ ᴺᵁᴸᴸ │ -│ 3 │ 2 │ -│ 3 │ 3 │ -│ 3 │ ᴺᵁᴸᴸ │ -└───┴──────┘ -``` - -Requête `SELECT sum(x), y FROM t_null_big GROUP BY y` résultats dans: - -``` text -┌─sum(x)─┬────y─┐ -│ 4 │ 2 │ -│ 3 │ 3 │ -│ 5 │ ᴺᵁᴸᴸ │ -└────────┴──────┘ -``` - -Vous pouvez voir que `GROUP BY` pour `y = NULL` résumer `x` comme si `NULL` a cette valeur. - -Si vous passez plusieurs clés `GROUP BY` le résultat vous donnera toutes les combinaisons de la sélection, comme si `NULL` ont une valeur spécifique. - -## Avec modificateur de totaux {#with-totals-modifier} - -Si l' `WITH TOTALS` modificateur est spécifié, une autre ligne sera calculée. Cette ligne aura des colonnes clés contenant des valeurs par défaut (zéros ou lignes vides), et des colonnes de fonctions d'agrégat avec les valeurs calculées sur toutes les lignes (le “total” valeur). - -Cette ligne supplémentaire est uniquement produite en `JSON*`, `TabSeparated*`, et `Pretty*` formats, séparément des autres lignes: - -- Dans `JSON*` formats, cette ligne est sortie en tant que distinct ‘totals’ champ. -- Dans `TabSeparated*` formats, la ligne vient après le résultat principal, précédé par une ligne vide (après les autres données). -- Dans `Pretty*` formats, la ligne est sortie comme une table séparée après le résultat principal. -- Dans les autres formats, il n'est pas disponible. - -`WITH TOTALS` peut être exécuté de différentes manières lorsqu'il est présent. Le comportement dépend de l' ‘totals_mode’ paramètre. - -### Configuration Du Traitement Des Totaux {#configuring-totals-processing} - -Par défaut, `totals_mode = 'before_having'`. Dans ce cas, ‘totals’ est calculé sur toutes les lignes, y compris celles qui ne passent pas par `max_rows_to_group_by`. - -Les autres alternatives incluent uniquement les lignes qui passent à travers avoir dans ‘totals’, et se comporter différemment avec le réglage `max_rows_to_group_by` et `group_by_overflow_mode = 'any'`. - -`after_having_exclusive` – Don't include rows that didn't pass through `max_rows_to_group_by`. En d'autres termes, ‘totals’ aura moins ou le même nombre de lignes que si `max_rows_to_group_by` ont été omis. - -`after_having_inclusive` – Include all the rows that didn't pass through ‘max_rows_to_group_by’ dans ‘totals’. En d'autres termes, ‘totals’ aura plus ou le même nombre de lignes que si `max_rows_to_group_by` ont été omis. - -`after_having_auto` – Count the number of rows that passed through HAVING. If it is more than a certain amount (by default, 50%), include all the rows that didn't pass through ‘max_rows_to_group_by’ dans ‘totals’. Sinon, ne pas les inclure. - -`totals_auto_threshold` – By default, 0.5. The coefficient for `after_having_auto`. - -Si `max_rows_to_group_by` et `group_by_overflow_mode = 'any'` ne sont pas utilisés, toutes les variations de `after_having` sont les mêmes, et vous pouvez utiliser l'un d'eux (par exemple, `after_having_auto`). - -Vous pouvez utiliser avec les totaux dans les sous-requêtes, y compris les sous-requêtes dans la clause JOIN (dans ce cas, les valeurs totales respectives sont combinées). - -## Exemple {#examples} - -Exemple: - -``` sql -SELECT - count(), - median(FetchTiming > 60 ? 60 : FetchTiming), - count() - sum(Refresh) -FROM hits -``` - -Cependant, contrairement au SQL standard, si la table n'a pas de lignes (soit il n'y en a pas du tout, soit il n'y en a pas après avoir utilisé WHERE to filter), un résultat vide est renvoyé, et non le résultat d'une des lignes contenant les valeurs initiales des fonctions d'agrégat. - -Contrairement à MySQL (et conforme à SQL standard), vous ne pouvez pas obtenir une valeur d'une colonne qui n'est pas dans une fonction clé ou agrégée (sauf les expressions constantes). Pour contourner ce problème, vous pouvez utiliser le ‘any’ fonction d'agrégation (récupère la première valeur rencontrée) ou ‘min/max’. - -Exemple: - -``` sql -SELECT - domainWithoutWWW(URL) AS domain, - count(), - any(Title) AS title -- getting the first occurred page header for each domain. -FROM hits -GROUP BY domain -``` - -Pour chaque valeur de clé différente rencontrée, GROUP BY calcule un ensemble de valeurs de fonction d'agrégation. - -GROUP BY n'est pas pris en charge pour les colonnes de tableau. - -Une constante ne peut pas être spécifiée comme arguments pour les fonctions d'agrégation. Exemple: somme(1). Au lieu de cela, vous pouvez vous débarrasser de la constante. Exemple: `count()`. - -## Détails De Mise En Œuvre {#implementation-details} - -L'agrégation est l'une des caractéristiques les plus importantes d'un SGBD orienté colonne, et donc son implémentation est l'une des parties les plus optimisées de ClickHouse. Par défaut, l'agrégation se fait en mémoire à l'aide d'une table de hachage. Il a plus de 40 spécialisations qui sont choisies automatiquement en fonction de “grouping key” types de données. - -### Groupe par dans la mémoire externe {#select-group-by-in-external-memory} - -Vous pouvez activer le dumping des données temporaires sur le disque pour limiter l'utilisation de la mémoire pendant `GROUP BY`. -Le [max_bytes_before_external_group_by](../../../operations/settings/settings.md#settings-max_bytes_before_external_group_by) réglage détermine le seuil de consommation de RAM pour le dumping `GROUP BY` données temporaires dans le système de fichiers. Si elle est définie sur 0 (valeur par défaut), elle est désactivée. - -Lors de l'utilisation de `max_bytes_before_external_group_by`, nous vous recommandons de définir `max_memory_usage` environ deux fois plus élevé. Ceci est nécessaire car il y a deux étapes à l'agrégation: la lecture des données et la formation des données intermédiaires (1) et la fusion des données intermédiaires (2). Le Dumping des données dans le système de fichiers ne peut se produire qu'au cours de l'étape 1. Si les données temporaires n'ont pas été vidées, l'étape 2 peut nécessiter jusqu'à la même quantité de mémoire qu'à l'étape 1. - -Par exemple, si [max_memory_usage](../../../operations/settings/settings.md#settings_max_memory_usage) a été défini sur 10000000000 et que vous souhaitez utiliser l'agrégation externe, il est logique de définir `max_bytes_before_external_group_by` à 10000000000, et `max_memory_usage` à 20000000000. Lorsque l'agrégation externe est déclenchée (s'il y a eu au moins un vidage de données temporaires), la consommation maximale de RAM n'est que légèrement supérieure à `max_bytes_before_external_group_by`. - -Avec le traitement des requêtes distribuées, l'agrégation externe est effectuée sur des serveurs distants. Pour que le serveur demandeur n'utilise qu'une petite quantité de RAM, définissez `distributed_aggregation_memory_efficient` 1. - -Lors de la fusion de données vidées sur le disque, ainsi que lors de la fusion des résultats de serveurs distants lorsque `distributed_aggregation_memory_efficient` paramètre est activé, consomme jusqu'à `1/256 * the_number_of_threads` à partir de la quantité totale de mémoire RAM. - -Lorsque l'agrégation externe est activée, s'il y a moins de `max_bytes_before_external_group_by` of data (i.e. data was not flushed), the query runs just as fast as without external aggregation. If any temporary data was flushed, the run time will be several times longer (approximately three times). - -Si vous avez un [ORDER BY](order-by.md) avec un [LIMIT](limit.md) après `GROUP BY` puis la quantité de RAM dépend de la quantité de données dans `LIMIT`, pas dans l'ensemble de la table. Mais si l' `ORDER BY` n'a pas `LIMIT`, n'oubliez pas d'activer externe de tri (`max_bytes_before_external_sort`). diff --git a/docs/fr/sql-reference/statements/select/having.md b/docs/fr/sql-reference/statements/select/having.md deleted file mode 100644 index 9425830c3d4..00000000000 --- a/docs/fr/sql-reference/statements/select/having.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause HAVING {#having-clause} - -Permet de filtrer les résultats d'agrégation produits par [GROUP BY](group-by.md). Il est similaire à la [WHERE](where.md) la clause, mais la différence est que `WHERE` est effectuée avant l'agrégation, tandis que `HAVING` est effectué d'après elle. - -Il est possible de référencer les résultats d'agrégation à partir de `SELECT` la clause dans `HAVING` clause par leur alias. Alternativement, `HAVING` clause peut filtrer sur les résultats d'agrégats supplémentaires qui ne sont pas retournés dans les résultats de la requête. - -## Limitation {#limitations} - -`HAVING` ne peut pas être utilisé si le regroupement n'est pas effectuée. Utiliser `WHERE` plutôt. diff --git a/docs/fr/sql-reference/statements/select/index.md b/docs/fr/sql-reference/statements/select/index.md deleted file mode 100644 index 1d53ae80eb4..00000000000 --- a/docs/fr/sql-reference/statements/select/index.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 33 -toc_title: SELECT ---- - -# Sélectionnez la syntaxe des requêtes {#select-queries-syntax} - -`SELECT` effectue la récupération des données. - -``` sql -[WITH expr_list|(subquery)] -SELECT [DISTINCT] expr_list -[FROM [db.]table | (subquery) | table_function] [FINAL] -[SAMPLE sample_coeff] -[ARRAY JOIN ...] -[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON )|(USING ) -[PREWHERE expr] -[WHERE expr] -[GROUP BY expr_list] [WITH TOTALS] -[HAVING expr] -[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr] -[LIMIT [offset_value, ]n BY columns] -[LIMIT [n, ]m] [WITH TIES] -[UNION ALL ...] -[INTO OUTFILE filename] -[FORMAT format] -``` - -Toutes les clauses sont facultatives, à l'exception de la liste d'expressions requise immédiatement après `SELECT` qui est abordée plus en détail [dessous](#select-clause). - -Spécificités de chaque clause facultative, sont couverts dans des sections distinctes, qui sont énumérés dans le même ordre qu'elles sont exécutées: - -- [AVEC la clause](with.md) -- [La clause DISTINCT](distinct.md) -- [De la clause](from.md) -- [Exemple de clause](sample.md) -- [Clause de JOINTURE](join.md) -- [Clause PREWHERE](prewhere.md) -- [Clause where](where.md) -- [Groupe par clause](group-by.md) -- [Limite par clause](limit-by.md) -- [Clause HAVING](having.md) -- [Clause SELECT](#select-clause) -- [Clause LIMIT](limit.md) -- [Clause UNION ALL](union.md) - -## Clause SELECT {#select-clause} - -[Expression](../../syntax.md#syntax-expressions) spécifié dans le `SELECT` clause sont calculés après toutes les opérations dans les clauses décrites ci-dessus sont terminés. Ces expressions fonctionnent comme si elles s'appliquaient à des lignes séparées dans le résultat. Si les expressions dans le `SELECT` la clause contient des fonctions d'agrégation, puis clickhouse traite les fonctions d'agrégation et les expressions utilisées [GROUP BY](group-by.md) agrégation. - -Si vous souhaitez inclure toutes les colonnes dans le résultat, utilisez l'astérisque (`*`) symbole. Exemple, `SELECT * FROM ...`. - -Pour correspondre à certaines colonnes dans le résultat avec un [re2](https://en.wikipedia.org/wiki/RE2_(software)) expression régulière, vous pouvez utiliser le `COLUMNS` expression. - -``` sql -COLUMNS('regexp') -``` - -Par exemple, considérez le tableau: - -``` sql -CREATE TABLE default.col_names (aa Int8, ab Int8, bc Int8) ENGINE = TinyLog -``` - -La requête suivante sélectionne les données de toutes les colonnes contenant les `a` symbole dans leur nom. - -``` sql -SELECT COLUMNS('a') FROM col_names -``` - -``` text -┌─aa─┬─ab─┐ -│ 1 │ 1 │ -└────┴────┘ -``` - -Les colonnes sélectionnées sont retournés pas dans l'ordre alphabétique. - -Vous pouvez utiliser plusieurs `COLUMNS` expressions dans une requête et leur appliquer des fonctions. - -Exemple: - -``` sql -SELECT COLUMNS('a'), COLUMNS('c'), toTypeName(COLUMNS('c')) FROM col_names -``` - -``` text -┌─aa─┬─ab─┬─bc─┬─toTypeName(bc)─┐ -│ 1 │ 1 │ 1 │ Int8 │ -└────┴────┴────┴────────────────┘ -``` - -Chaque colonne renvoyée par le `COLUMNS` expression est passée à la fonction en tant qu'argument séparé. Vous pouvez également passer d'autres arguments à la fonction si elle les supporte. Soyez prudent lorsque vous utilisez des fonctions. Si une fonction ne prend pas en charge le nombre d'arguments que vous lui avez transmis, ClickHouse lève une exception. - -Exemple: - -``` sql -SELECT COLUMNS('a') + COLUMNS('c') FROM col_names -``` - -``` text -Received exception from server (version 19.14.1): -Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of arguments for function plus doesn't match: passed 3, should be 2. -``` - -Dans cet exemple, `COLUMNS('a')` retourne deux colonnes: `aa` et `ab`. `COLUMNS('c')` renvoie la `bc` colonne. Le `+` l'opérateur ne peut pas s'appliquer à 3 arguments, donc ClickHouse lève une exception avec le message pertinent. - -Colonnes qui correspondent à la `COLUMNS` l'expression peut avoir différents types de données. Si `COLUMNS` ne correspond à aucune colonne et est la seule expression dans `SELECT`, ClickHouse lance une exception. - -### Astérisque {#asterisk} - -Vous pouvez mettre un astérisque dans quelque partie de la requête au lieu d'une expression. Lorsque la requête est analysée, l'astérisque est étendu à une liste de toutes les colonnes `MATERIALIZED` et `ALIAS` colonne). Il n'y a que quelques cas où l'utilisation d'un astérisque est justifiée: - -- Lors de la création d'un vidage de table. -- Pour les tables contenant seulement quelques colonnes, comme les tables système. -- Pour obtenir des informations sur ce que sont les colonnes dans une table. Dans ce cas, la valeur `LIMIT 1`. Mais il est préférable d'utiliser la `DESC TABLE` requête. -- Quand il y a une forte filtration sur un petit nombre de colonnes en utilisant `PREWHERE`. -- Dans les sous-requêtes (puisque les colonnes qui ne sont pas nécessaires pour la requête externe sont exclues des sous-requêtes). - -Dans tous les autres cas, nous ne recommandons pas d'utiliser l'astérisque, car il ne vous donne que les inconvénients d'un SGBD colonnaire au lieu des avantages. En d'autres termes, l'utilisation de l'astérisque n'est pas recommandée. - -### Les Valeurs Extrêmes {#extreme-values} - -En plus des résultats, vous pouvez également obtenir des valeurs minimales et maximales pour les colonnes de résultats. Pour ce faire, définissez la **extrême** réglage sur 1. Les Minimums et les maximums sont calculés pour les types numériques, les dates et les dates avec des heures. Pour les autres colonnes, les valeurs par défaut sont sorties. - -An extra two rows are calculated – the minimums and maximums, respectively. These extra two rows are output in `JSON*`, `TabSeparated*`, et `Pretty*` [format](../../../interfaces/formats.md), séparés des autres lignes. Ils ne sont pas Produits pour d'autres formats. - -Dans `JSON*` formats, les valeurs extrêmes sont sorties dans un ‘extremes’ champ. Dans `TabSeparated*` formats, la ligne vient après le résultat principal, et après ‘totals’ si elle est présente. Elle est précédée par une ligne vide (après les autres données). Dans `Pretty*` formats, la ligne est sortie comme une table séparée après le résultat principal, et après `totals` si elle est présente. - -Les valeurs extrêmes sont calculées pour les lignes avant `LIMIT` mais après `LIMIT BY`. Cependant, lors de l'utilisation de `LIMIT offset, size`, les lignes avant de les `offset` sont inclus dans `extremes`. Dans les requêtes de flux, le résultat peut également inclure un petit nombre de lignes qui ont traversé `LIMIT`. - -### Note {#notes} - -Vous pouvez utiliser des synonymes (`AS` alias) dans n'importe quelle partie d'une requête. - -Le `GROUP BY` et `ORDER BY` les clauses ne supportent pas les arguments positionnels. Cela contredit MySQL, mais est conforme à SQL standard. Exemple, `GROUP BY 1, 2` will be interpreted as grouping by constants (i.e. aggregation of all rows into one). - -## Détails De Mise En Œuvre {#implementation-details} - -Si la requête omet le `DISTINCT`, `GROUP BY` et `ORDER BY` les clauses et les `IN` et `JOIN` sous-requêtes, la requête sera complètement traitée en flux, en utilisant O (1) quantité de RAM. Sinon, la requête peut consommer beaucoup de RAM si les restrictions appropriées ne sont pas spécifiées: - -- `max_memory_usage` -- `max_rows_to_group_by` -- `max_rows_to_sort` -- `max_rows_in_distinct` -- `max_bytes_in_distinct` -- `max_rows_in_set` -- `max_bytes_in_set` -- `max_rows_in_join` -- `max_bytes_in_join` -- `max_bytes_before_external_sort` -- `max_bytes_before_external_group_by` - -Pour plus d'informations, consultez la section “Settings”. Il est possible d'utiliser le tri externe (sauvegarde des tables temporaires sur un disque) et l'agrégation externe. - -{## [Article Original](https://clickhouse.tech/docs/en/sql-reference/statements/select/) ##} diff --git a/docs/fr/sql-reference/statements/select/into-outfile.md b/docs/fr/sql-reference/statements/select/into-outfile.md deleted file mode 100644 index 0150de7cb97..00000000000 --- a/docs/fr/sql-reference/statements/select/into-outfile.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Dans OUTFILE Clause {#into-outfile-clause} - -Ajouter l' `INTO OUTFILE filename` clause (où filename est un littéral de chaîne) pour `SELECT query` pour rediriger sa sortie vers le fichier spécifié côté client. - -## Détails De Mise En Œuvre {#implementation-details} - -- Cette fonctionnalité est disponible dans les [client de ligne de commande](../../../interfaces/cli.md) et [clickhouse-local](../../../operations/utilities/clickhouse-local.md). Ainsi, une requête envoyée par [Interface HTTP](../../../interfaces/http.md) va échouer. -- La requête échouera si un fichier portant le même nom existe déjà. -- Défaut [le format de sortie](../../../interfaces/formats.md) être `TabSeparated` (comme dans le mode batch client en ligne de commande). diff --git a/docs/fr/sql-reference/statements/select/join.md b/docs/fr/sql-reference/statements/select/join.md deleted file mode 100644 index 4233a120674..00000000000 --- a/docs/fr/sql-reference/statements/select/join.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause de JOINTURE {#select-join} - -Join produit une nouvelle table en combinant des colonnes d'une ou plusieurs tables en utilisant des valeurs communes à chacune. C'est une opération courante dans les bases de données avec support SQL, ce qui correspond à [l'algèbre relationnelle](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators) rejoindre. Le cas particulier d'une jointure de table est souvent appelé “self-join”. - -Syntaxe: - -``` sql -SELECT -FROM -[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN -(ON )|(USING ) ... -``` - -Les Expressions de `ON` clause et colonnes de `USING` clause sont appelés “join keys”. Sauf indication contraire, joindre un produit [Produit cartésien](https://en.wikipedia.org/wiki/Cartesian_product) des lignes, avec correspondance “join keys”, ce qui pourrait produire des résultats avec beaucoup plus de lignes que les tables source. - -## Types de jointure pris en charge {#select-join-types} - -Tous les standard [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) les types sont pris en charge: - -- `INNER JOIN`, seules les lignes correspondantes sont retournés. -- `LEFT OUTER JOIN`, les lignes non correspondantes de la table de gauche sont retournées en plus des lignes correspondantes. -- `RIGHT OUTER JOIN`, les lignes non correspondantes de la table de gauche sont retournées en plus des lignes correspondantes. -- `FULL OUTER JOIN`, les lignes non correspondantes des deux tables sont renvoyées en plus des lignes correspondantes. -- `CROSS JOIN`, produit le produit cartésien des tables entières, “join keys” être **pas** défini. - -`JOIN` sans type spécifié implique `INNER`. Mot `OUTER` peut les oublier. Syntaxe Alternative pour `CROSS JOIN` spécifie plusieurs tables dans [De la clause](from.md) séparés par des virgules. - -Autres types de jointure disponibles dans ClickHouse: - -- `LEFT SEMI JOIN` et `RIGHT SEMI JOIN` une liste blanche sur “join keys”, sans produire un produit cartésien. -- `LEFT ANTI JOIN` et `RIGHT ANTI JOIN` une liste noire sur “join keys”, sans produire un produit cartésien. -- `LEFT ANY JOIN`, `RIGHT ANY JOIN` et `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types. -- `ASOF JOIN` et `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below. - -## Setting {#join-settings} - -!!! note "Note" - La valeur de rigueur par défaut peut être remplacée à l'aide [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) paramètre. - -### ASOF joindre L'utilisation {#asof-join-usage} - -`ASOF JOIN` est utile lorsque vous devez joindre des enregistrements qui n'ont pas de correspondance exacte. - -Tables pour `ASOF JOIN` doit avoir une colonne de séquence ordonnée. Cette colonne ne peut pas être seule dans une table et doit être l'un des types de données: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, et `DateTime`. - -Syntaxe `ASOF JOIN ... ON`: - -``` sql -SELECT expressions_list -FROM table_1 -ASOF LEFT JOIN table_2 -ON equi_cond AND closest_match_cond -``` - -Vous pouvez utiliser n'importe quel nombre de conditions d'égalité et exactement une condition de correspondance la plus proche. Exemple, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. - -Conditions prises en charge pour la correspondance la plus proche: `>`, `>=`, `<`, `<=`. - -Syntaxe `ASOF JOIN ... USING`: - -``` sql -SELECT expressions_list -FROM table_1 -ASOF JOIN table_2 -USING (equi_column1, ... equi_columnN, asof_column) -``` - -`ASOF JOIN` utiliser `equi_columnX` pour rejoindre sur l'égalité et `asof_column` pour rejoindre le match le plus proche avec le `table_1.asof_column >= table_2.asof_column` condition. Le `asof_column` colonne toujours la dernière dans le `USING` clause. - -Par exemple, considérez les tableaux suivants: - - table_1 table_2 - event | ev_time | user_id event | ev_time | user_id - ----------|---------|---------- ----------|---------|---------- - ... ... - event_1_1 | 12:00 | 42 event_2_1 | 11:59 | 42 - ... event_2_2 | 12:30 | 42 - event_1_2 | 13:00 | 42 event_2_3 | 13:00 | 42 - ... ... - -`ASOF JOIN` peut prendre la date d'un événement utilisateur de `table_1` et trouver un événement dans `table_2` où le timestamp est plus proche de l'horodatage de l'événement à partir de `table_1` correspondant à la condition de correspondance la plus proche. Les valeurs d'horodatage égales sont les plus proches si elles sont disponibles. Ici, l' `user_id` la colonne peut être utilisée pour joindre sur l'égalité et le `ev_time` la colonne peut être utilisée pour se joindre à la correspondance la plus proche. Dans notre exemple, `event_1_1` peut être jointe à `event_2_1` et `event_1_2` peut être jointe à `event_2_3`, mais `event_2_2` ne peut pas être rejoint. - -!!! note "Note" - `ASOF` jointure est **pas** pris en charge dans le [Rejoindre](../../../engines/table-engines/special/join.md) tableau moteur. - -## Jointure Distribuée {#global-join} - -Il existe deux façons d'exécuter join impliquant des tables distribuées: - -- Lors de l'utilisation normale `JOIN` la requête est envoyée aux serveurs distants. Les sous-requêtes sont exécutées sur chacune d'elles afin de créer la bonne table, et la jointure est effectuée avec cette table. En d'autres termes, la table de droite est formée sur chaque serveur séparément. -- Lors de l'utilisation de `GLOBAL ... JOIN`, d'abord le serveur demandeur exécute une sous-requête pour calculer la bonne table. Cette table temporaire est transmise à chaque serveur distant, et les requêtes sont exécutées sur eux en utilisant les données temporaires qui ont été transmises. - -Soyez prudent lorsque vous utilisez `GLOBAL`. Pour plus d'informations, voir le [Sous-requêtes distribuées](../../operators/in.md#select-distributed-subqueries) section. - -## Recommandations D'Utilisation {#usage-recommendations} - -### Traitement des cellules vides ou nulles {#processing-of-empty-or-null-cells} - -Lors de la jonction de tables, les cellules vides peuvent apparaître. Paramètre [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls) définir comment clickhouse remplit ces cellules. - -Si l' `JOIN` les touches sont [Nullable](../../data-types/nullable.md) champs, les lignes où au moins une des clés a la valeur [NULL](../../../sql-reference/syntax.md#null-literal) ne sont pas jointes. - -### Syntaxe {#syntax} - -Les colonnes spécifiées dans `USING` doit avoir les mêmes noms dans les deux sous-requêtes, et les autres colonnes doivent être nommées différemment. Vous pouvez utiliser des alias pour les noms des colonnes dans les sous-requêtes. - -Le `USING` clause spécifie une ou plusieurs colonnes de jointure, qui établit l'égalité de ces colonnes. La liste des colonnes est définie sans crochets. Les conditions de jointure plus complexes ne sont pas prises en charge. - -### Limitations De Syntaxe {#syntax-limitations} - -Pour plusieurs `JOIN` clauses dans un seul `SELECT` requête: - -- Prendre toutes les colonnes via `*` n'est disponible que si les tables sont jointes, pas les sous-requêtes. -- Le `PREWHERE` la clause n'est pas disponible. - -Pour `ON`, `WHERE`, et `GROUP BY` clause: - -- Les expressions arbitraires ne peuvent pas être utilisées dans `ON`, `WHERE`, et `GROUP BY` mais vous pouvez définir une expression dans un `SELECT` clause et ensuite l'utiliser dans ces clauses via un alias. - -### Performance {#performance} - -Lors de l'exécution d'un `JOIN`, il n'y a pas d'optimisation de la commande d'exécution par rapport aux autres stades de la requête. La jointure (une recherche dans la table de droite) est exécutée avant de filtrer `WHERE` et avant l'agrégation. - -Chaque fois qu'une requête est exécutée avec la même `JOIN`, la sous-requête est exécutée à nouveau car le résultat n'est pas mis en cache. Pour éviter cela, utilisez la spéciale [Rejoindre](../../../engines/table-engines/special/join.md) table engine, qui est un tableau préparé pour l'assemblage qui est toujours en RAM. - -Dans certains cas, il est plus efficace d'utiliser [IN](../../operators/in.md) plutôt `JOIN`. - -Si vous avez besoin d'un `JOIN` pour se joindre à des tables de dimension (ce sont des tables relativement petites qui contiennent des propriétés de dimension, telles que des noms pour des campagnes publicitaires), un `JOIN` peut-être pas très pratique en raison du fait que la bonne table est ré-accédée pour chaque requête. Pour de tels cas, il y a un “external dictionaries” la fonctionnalité que vous devez utiliser à la place de `JOIN`. Pour plus d'informations, voir le [Dictionnaires externes](../../dictionaries/external-dictionaries/external-dicts.md) section. - -### Limitations De Mémoire {#memory-limitations} - -Par défaut, ClickHouse utilise [jointure de hachage](https://en.wikipedia.org/wiki/Hash_join) algorithme. ClickHouse prend le `` et crée une table de hachage pour cela dans la RAM. Après un certain seuil de consommation de mémoire, ClickHouse revient à fusionner l'algorithme de jointure. - -Si vous devez restreindre la consommation de mémoire de l'opération join utilisez les paramètres suivants: - -- [max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) — Limits number of rows in the hash table. -- [max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) — Limits size of the hash table. - -Lorsque l'une de ces limites est atteinte, ClickHouse agit comme [join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode) réglage des instructions. - -## Exemple {#examples} - -Exemple: - -``` sql -SELECT - CounterID, - hits, - visits -FROM -( - SELECT - CounterID, - count() AS hits - FROM test.hits - GROUP BY CounterID -) ANY LEFT JOIN -( - SELECT - CounterID, - sum(Sign) AS visits - FROM test.visits - GROUP BY CounterID -) USING CounterID -ORDER BY hits DESC -LIMIT 10 -``` - -``` text -┌─CounterID─┬───hits─┬─visits─┐ -│ 1143050 │ 523264 │ 13665 │ -│ 731962 │ 475698 │ 102716 │ -│ 722545 │ 337212 │ 108187 │ -│ 722889 │ 252197 │ 10547 │ -│ 2237260 │ 196036 │ 9522 │ -│ 23057320 │ 147211 │ 7689 │ -│ 722818 │ 90109 │ 17847 │ -│ 48221 │ 85379 │ 4652 │ -│ 19762435 │ 77807 │ 7026 │ -│ 722884 │ 77492 │ 11056 │ -└───────────┴────────┴────────┘ -``` diff --git a/docs/fr/sql-reference/statements/select/limit-by.md b/docs/fr/sql-reference/statements/select/limit-by.md deleted file mode 100644 index 4d1bd766ef1..00000000000 --- a/docs/fr/sql-reference/statements/select/limit-by.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Limite par Clause {#limit-by-clause} - -Une requête avec l' `LIMIT n BY expressions` la clause sélectionne le premier `n` lignes pour chaque valeur distincte de `expressions`. La clé pour `LIMIT BY` peut contenir n'importe quel nombre de [expression](../../syntax.md#syntax-expressions). - -ClickHouse prend en charge les variantes de syntaxe suivantes: - -- `LIMIT [offset_value, ]n BY expressions` -- `LIMIT n OFFSET offset_value BY expressions` - -Pendant le traitement de la requête, ClickHouse sélectionne les données classées par clé de tri. La clé de tri est définie explicitement à l'aide [ORDER BY](order-by.md) clause ou implicitement en tant que propriété du moteur de table. Puis clickhouse s'applique `LIMIT n BY expressions` et renvoie le premier `n` lignes pour chaque combinaison distincte de `expressions`. Si `OFFSET` est spécifié, puis pour chaque bloc de données qui appartient à une combinaison particulière de `expressions`, Clickhouse saute `offset_value` nombre de lignes depuis le début du bloc et renvoie un maximum de `n` les lignes en conséquence. Si `offset_value` est plus grand que le nombre de lignes dans le bloc de données, ClickHouse renvoie zéro lignes du bloc. - -!!! note "Note" - `LIMIT BY` n'est pas liée à [LIMIT](limit.md). Ils peuvent tous deux être utilisés dans la même requête. - -## Exemple {#examples} - -Exemple de table: - -``` sql -CREATE TABLE limit_by(id Int, val Int) ENGINE = Memory; -INSERT INTO limit_by VALUES (1, 10), (1, 11), (1, 12), (2, 20), (2, 21); -``` - -Requête: - -``` sql -SELECT * FROM limit_by ORDER BY id, val LIMIT 2 BY id -``` - -``` text -┌─id─┬─val─┐ -│ 1 │ 10 │ -│ 1 │ 11 │ -│ 2 │ 20 │ -│ 2 │ 21 │ -└────┴─────┘ -``` - -``` sql -SELECT * FROM limit_by ORDER BY id, val LIMIT 1, 2 BY id -``` - -``` text -┌─id─┬─val─┐ -│ 1 │ 11 │ -│ 1 │ 12 │ -│ 2 │ 21 │ -└────┴─────┘ -``` - -Le `SELECT * FROM limit_by ORDER BY id, val LIMIT 2 OFFSET 1 BY id` requête renvoie le même résultat. - -La requête suivante renvoie les 5 principaux référents pour chaque `domain, device_type` paire avec un maximum de 100 lignes au total (`LIMIT n BY + LIMIT`). - -``` sql -SELECT - domainWithoutWWW(URL) AS domain, - domainWithoutWWW(REFERRER_URL) AS referrer, - device_type, - count() cnt -FROM hits -GROUP BY domain, referrer, device_type -ORDER BY cnt DESC -LIMIT 5 BY domain, device_type -LIMIT 100 -``` diff --git a/docs/fr/sql-reference/statements/select/limit.md b/docs/fr/sql-reference/statements/select/limit.md deleted file mode 100644 index 69334c32cc9..00000000000 --- a/docs/fr/sql-reference/statements/select/limit.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause LIMIT {#limit-clause} - -`LIMIT m` permet de sélectionner la première `m` lignes du résultat. - -`LIMIT n, m` permet de sélectionner le `m` lignes du résultat après avoir sauté le premier `n` rangée. Le `LIMIT m OFFSET n` la syntaxe est équivalente. - -`n` et `m` doivent être des entiers non négatifs. - -Si il n'y a pas de [ORDER BY](order-by.md) clause qui trie explicitement les résultats, le choix des lignes pour le résultat peut être arbitraire et non déterministe. diff --git a/docs/fr/sql-reference/statements/select/order-by.md b/docs/fr/sql-reference/statements/select/order-by.md deleted file mode 100644 index 2a4ef58d7ad..00000000000 --- a/docs/fr/sql-reference/statements/select/order-by.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause ORDER BY {#select-order-by} - -Le `ORDER BY` clause contient une liste des expressions, qui peuvent être attribuées avec `DESC` (décroissant) ou `ASC` modificateur (ascendant) qui détermine la direction de tri. Si la direction n'est pas spécifié, `ASC` est supposé, donc il est généralement omis. La direction de tri s'applique à une seule expression, pas à la liste entière. Exemple: `ORDER BY Visits DESC, SearchPhrase` - -Les lignes qui ont des valeurs identiques pour la liste des expressions de tri sont sorties dans un ordre arbitraire, qui peut également être non déterministe (différent à chaque fois). -Si la clause ORDER BY est omise, l'ordre des lignes est également indéfini et peut également être non déterministe. - -## Tri des valeurs spéciales {#sorting-of-special-values} - -Il existe deux approches pour `NaN` et `NULL` ordre de tri: - -- Par défaut ou avec le `NULLS LAST` modificateur: d'abord les valeurs, puis `NaN`, puis `NULL`. -- Avec l' `NULLS FIRST` modificateur: première `NULL`, puis `NaN` puis d'autres valeurs. - -### Exemple {#example} - -Pour la table - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 2 │ -│ 1 │ nan │ -│ 2 │ 2 │ -│ 3 │ 4 │ -│ 5 │ 6 │ -│ 6 │ nan │ -│ 7 │ ᴺᵁᴸᴸ │ -│ 6 │ 7 │ -│ 8 │ 9 │ -└───┴──────┘ -``` - -Exécuter la requête `SELECT * FROM t_null_nan ORDER BY y NULLS FIRST` obtenir: - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 7 │ ᴺᵁᴸᴸ │ -│ 1 │ nan │ -│ 6 │ nan │ -│ 2 │ 2 │ -│ 2 │ 2 │ -│ 3 │ 4 │ -│ 5 │ 6 │ -│ 6 │ 7 │ -│ 8 │ 9 │ -└───┴──────┘ -``` - -Lorsque les nombres à virgule flottante sont triés, les Nan sont séparés des autres valeurs. Quel que soit l'ordre de tri, NaNs viennent à la fin. En d'autres termes, pour le Tri ascendant, ils sont placés comme s'ils étaient plus grands que tous les autres nombres, tandis que pour le Tri descendant, ils sont placés comme s'ils étaient plus petits que les autres. - -## Classement De Soutien {#collation-support} - -Pour le tri par valeurs de chaîne, vous pouvez spécifier le classement (comparaison). Exemple: `ORDER BY SearchPhrase COLLATE 'tr'` - pour le tri par mot-clé dans l'ordre croissant, en utilisant l'alphabet turc, insensible à la casse, en supposant que les chaînes sont encodées en UTF-8. COLLATE peut être spécifié ou non pour chaque expression dans L'ordre par indépendamment. Si ASC ou DESC est spécifié, COLLATE est spécifié après. Lors de L'utilisation de COLLATE, le tri est toujours insensible à la casse. - -Nous recommandons uniquement D'utiliser COLLATE pour le tri final d'un petit nombre de lignes, car le tri avec COLLATE est moins efficace que le tri normal par octets. - -## Détails De Mise En Œuvre {#implementation-details} - -Moins de RAM est utilisé si un assez petit [LIMIT](limit.md) est précisée en plus `ORDER BY`. Sinon, la quantité de mémoire dépensée est proportionnelle au volume de données à trier. Pour le traitement des requêtes distribuées, si [GROUP BY](group-by.md) est omis, le tri est partiellement effectué sur les serveurs distants et les résultats sont fusionnés Sur le serveur demandeur. Cela signifie que pour le tri distribué, le volume de données à trier peut être supérieur à la quantité de mémoire sur un seul serveur. - -S'il N'y a pas assez de RAM, il est possible d'effectuer un tri dans la mémoire externe (création de fichiers temporaires sur un disque). Utilisez le paramètre `max_bytes_before_external_sort` pour ce but. S'il est défini sur 0 (par défaut), le tri externe est désactivé. Si elle est activée, lorsque le volume de données à trier atteint le nombre spécifié d'octets, les données collectées sont triés et déposés dans un fichier temporaire. Une fois toutes les données lues, tous les fichiers triés sont fusionnés et les résultats sont générés. Les fichiers sont écrits dans le `/var/lib/clickhouse/tmp/` dans la configuration (par défaut, mais vous pouvez `tmp_path` paramètre pour modifier ce paramètre). - -L'exécution d'une requête peut utiliser plus de mémoire que `max_bytes_before_external_sort`. Pour cette raison, ce paramètre doit avoir une valeur significativement inférieure à `max_memory_usage`. Par exemple, si votre serveur dispose de 128 Go de RAM et que vous devez exécuter une seule requête, définissez `max_memory_usage` à 100 Go, et `max_bytes_before_external_sort` à 80 Go. - -Le tri externe fonctionne beaucoup moins efficacement que le tri dans la RAM. diff --git a/docs/fr/sql-reference/statements/select/prewhere.md b/docs/fr/sql-reference/statements/select/prewhere.md deleted file mode 100644 index 2c825d050f4..00000000000 --- a/docs/fr/sql-reference/statements/select/prewhere.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause PREWHERE {#prewhere-clause} - -Prewhere est une optimisation pour appliquer le filtrage plus efficacement. Il est activé par défaut, même si `PREWHERE` la clause n'est pas explicitement spécifié. Il fonctionne en déplaçant automatiquement une partie de [WHERE](where.md) condition à prewhere étape. Le rôle de `PREWHERE` la clause est seulement pour contrôler cette optimisation si vous pensez que vous savez comment le faire mieux que par défaut. - -Avec l'optimisation prewhere, au début, seules les colonnes nécessaires à l'exécution de l'expression prewhere sont lues. Ensuite, les autres colonnes sont lues qui sont nécessaires pour exécuter le reste de la requête, mais seulement les blocs où l'expression prewhere est “true” au moins pour certaines lignes. S'il y a beaucoup de blocs où prewhere expression est “false” pour toutes les lignes et prewhere a besoin de moins de colonnes que les autres parties de la requête, cela permet souvent de lire beaucoup moins de données à partir du disque pour l'exécution de la requête. - -## Contrôle Manuel De Prewhere {#controlling-prewhere-manually} - -La clause a le même sens que la `WHERE` clause. La différence est dans laquelle les données sont lues à partir de la table. Quand à commander manuellement `PREWHERE` pour les conditions de filtration qui sont utilisées par une minorité des colonnes de la requête, mais qui fournissent une filtration de données forte. Cela réduit le volume de données à lire. - -Une requête peut spécifier simultanément `PREWHERE` et `WHERE`. Dans ce cas, `PREWHERE` précéder `WHERE`. - -Si l' `optimize_move_to_prewhere` le paramètre est défini sur 0, heuristiques pour déplacer automatiquement des parties d'expressions `WHERE` de `PREWHERE` sont désactivés. - -## Limitation {#limitations} - -`PREWHERE` est uniquement pris en charge par les tables `*MergeTree` famille. diff --git a/docs/fr/sql-reference/statements/select/sample.md b/docs/fr/sql-reference/statements/select/sample.md deleted file mode 100644 index b2ddc060a19..00000000000 --- a/docs/fr/sql-reference/statements/select/sample.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Exemple de Clause {#select-sample-clause} - -Le `SAMPLE` clause permet approchée `SELECT` le traitement de la requête. - -Lorsque l'échantillonnage de données est activé, la requête n'est pas effectuée sur toutes les données, mais uniquement sur une certaine fraction de données (échantillon). Par exemple, si vous avez besoin de calculer des statistiques pour toutes les visites, il suffit d'exécuter la requête sur le 1/10 de la fraction de toutes les visites, puis multiplier le résultat par 10. - -Le traitement approximatif des requêtes peut être utile dans les cas suivants: - -- Lorsque vous avez des exigences de synchronisation strictes (comme \<100ms), mais que vous ne pouvez pas justifier le coût des ressources matérielles supplémentaires pour y répondre. -- Lorsque vos données brutes ne sont pas précises, l'approximation ne dégrade pas sensiblement la qualité. -- Les exigences commerciales ciblent des résultats approximatifs (pour la rentabilité, ou pour commercialiser des résultats exacts aux utilisateurs premium). - -!!! note "Note" - Vous ne pouvez utiliser l'échantillonnage qu'avec les tables [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) famille, et seulement si l'expression d'échantillonnage a été spécifiée lors de la création de la table (voir [Moteur MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table)). - -Les caractéristiques de l'échantillonnage des données sont énumérées ci-dessous: - -- L'échantillonnage de données est un mécanisme déterministe. Le résultat de la même `SELECT .. SAMPLE` la requête est toujours le même. -- L'échantillonnage fonctionne de manière cohérente pour différentes tables. Pour les tables avec une seule clé d'échantillonnage, un échantillon avec le même coefficient sélectionne toujours le même sous-ensemble de données possibles. Par exemple, un exemple d'ID utilisateur prend des lignes avec le même sous-ensemble de tous les ID utilisateur possibles de différentes tables. Cela signifie que vous pouvez utiliser l'exemple dans les sous-requêtes dans la [IN](../../operators/in.md) clause. En outre, vous pouvez joindre des échantillons en utilisant le [JOIN](join.md) clause. -- L'échantillonnage permet de lire moins de données à partir d'un disque. Notez que vous devez spécifier l'échantillonnage clé correctement. Pour plus d'informations, voir [Création d'une Table MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table). - -Pour l' `SAMPLE` clause la syntaxe suivante est prise en charge: - -| SAMPLE Clause Syntax | Description | -|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `SAMPLE k` | Ici `k` est le nombre de 0 à 1.
La requête est exécutée sur `k` fraction des données. Exemple, `SAMPLE 0.1` exécute la requête sur 10% des données. [Lire plus](#select-sample-k) | -| `SAMPLE n` | Ici `n` est un entier suffisamment grand.
La requête est exécutée sur un échantillon d'au moins `n` lignes (mais pas significativement plus que cela). Exemple, `SAMPLE 10000000` exécute la requête sur un minimum de 10 000 000 lignes. [Lire plus](#select-sample-n) | -| `SAMPLE k OFFSET m` | Ici `k` et `m` sont les nombres de 0 à 1.
La requête est exécutée sur un échantillon de `k` fraction des données. Les données utilisées pour l'échantillon est compensée par `m` fraction. [Lire plus](#select-sample-offset) | - -## SAMPLE K {#select-sample-k} - -Ici `k` est le nombre de 0 à 1 (les notations fractionnaires et décimales sont prises en charge). Exemple, `SAMPLE 1/2` ou `SAMPLE 0.5`. - -Dans un `SAMPLE k` clause, l'échantillon est prélevé à partir de la `k` fraction des données. L'exemple est illustré ci-dessous: - -``` sql -SELECT - Title, - count() * 10 AS PageViews -FROM hits_distributed -SAMPLE 0.1 -WHERE - CounterID = 34 -GROUP BY Title -ORDER BY PageViews DESC LIMIT 1000 -``` - -Dans cet exemple, la requête est exécutée sur un échantillon de 0,1 (10%) de données. Les valeurs des fonctions d'agrégat ne sont pas corrigées automatiquement, donc pour obtenir un résultat approximatif, la valeur `count()` est multiplié manuellement par 10. - -## SAMPLE N {#select-sample-n} - -Ici `n` est un entier suffisamment grand. Exemple, `SAMPLE 10000000`. - -Dans ce cas, la requête est exécutée sur un échantillon d'au moins `n` lignes (mais pas significativement plus que cela). Exemple, `SAMPLE 10000000` exécute la requête sur un minimum de 10 000 000 lignes. - -Puisque l'unité minimale pour la lecture des données est un granule (sa taille est définie par le `index_granularity` de réglage), il est logique de définir un échantillon beaucoup plus grand que la taille du granule. - -Lors de l'utilisation de la `SAMPLE n` clause, vous ne savez pas quel pourcentage relatif de données a été traité. Donc, vous ne connaissez pas le coefficient par lequel les fonctions agrégées doivent être multipliées. L'utilisation de la `_sample_factor` colonne virtuelle pour obtenir le résultat approximatif. - -Le `_sample_factor` colonne contient des coefficients relatifs qui sont calculés dynamiquement. Cette colonne est créée automatiquement lorsque vous [créer](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) une table avec la clé d'échantillonnage spécifiée. Les exemples d'utilisation de la `_sample_factor` colonne sont indiqués ci-dessous. - -Considérons la table `visits` qui contient des statistiques sur les visites de site. Le premier exemple montre comment calculer le nombre de pages vues: - -``` sql -SELECT sum(PageViews * _sample_factor) -FROM visits -SAMPLE 10000000 -``` - -L'exemple suivant montre comment calculer le nombre total de visites: - -``` sql -SELECT sum(_sample_factor) -FROM visits -SAMPLE 10000000 -``` - -L'exemple ci-dessous montre comment calculer la durée moyenne de la session. Notez que vous n'avez pas besoin d'utiliser le coefficient relatif pour calculer les valeurs moyennes. - -``` sql -SELECT avg(Duration) -FROM visits -SAMPLE 10000000 -``` - -## SAMPLE K OFFSET M {#select-sample-offset} - -Ici `k` et `m` sont des nombres de 0 à 1. Des exemples sont présentés ci-dessous. - -**Exemple 1** - -``` sql -SAMPLE 1/10 -``` - -Dans cet exemple, l'échantillon représente 1 / 10e de toutes les données: - -`[++------------]` - -**Exemple 2** - -``` sql -SAMPLE 1/10 OFFSET 1/2 -``` - -Ici, un échantillon de 10% est prélevé à partir de la seconde moitié des données. - -`[------++------]` diff --git a/docs/fr/sql-reference/statements/select/union.md b/docs/fr/sql-reference/statements/select/union.md deleted file mode 100644 index 9ae65ebcf72..00000000000 --- a/docs/fr/sql-reference/statements/select/union.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause UNION ALL {#union-clause} - -Vous pouvez utiliser `UNION ALL` à combiner `SELECT` requêtes en étendant leurs résultats. Exemple: - -``` sql -SELECT CounterID, 1 AS table, toInt64(count()) AS c - FROM test.hits - GROUP BY CounterID - -UNION ALL - -SELECT CounterID, 2 AS table, sum(Sign) AS c - FROM test.visits - GROUP BY CounterID - HAVING c > 0 -``` - -Les colonnes de résultat sont appariées par leur index (ordre intérieur `SELECT`). Si les noms de colonne ne correspondent pas, les noms du résultat final sont tirés de la première requête. - -La coulée de Type est effectuée pour les syndicats. Par exemple, si deux requêtes combinées ont le même champ avec non-`Nullable` et `Nullable` types d'un type compatible, la `UNION ALL` a un `Nullable` type de champ. - -Requêtes qui font partie de `UNION ALL` ne peut pas être placée entre parenthèses. [ORDER BY](order-by.md) et [LIMIT](limit.md) sont appliqués à des requêtes séparées, pas au résultat final. Si vous devez appliquer une conversion au résultat final, vous pouvez mettre toutes les requêtes avec `UNION ALL` dans une sous-requête dans la [FROM](from.md) clause. - -## Limitation {#limitations} - -Seulement `UNION ALL` est pris en charge. Régulier `UNION` (`UNION DISTINCT`) n'est pas pris en charge. Si vous avez besoin d' `UNION DISTINCT`, vous pouvez écrire `SELECT DISTINCT` à partir d'une sous-requête contenant `UNION ALL`. - -## Détails De Mise En Œuvre {#implementation-details} - -Requêtes qui font partie de `UNION ALL` peuvent être exécutées simultanément, et leurs résultats peuvent être mélangés ensemble. diff --git a/docs/fr/sql-reference/statements/select/where.md b/docs/fr/sql-reference/statements/select/where.md deleted file mode 100644 index a4d7bc5e87a..00000000000 --- a/docs/fr/sql-reference/statements/select/where.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# Clause where {#select-where} - -`WHERE` clause permet de filtrer les données en provenance de [FROM](from.md) la clause de `SELECT`. - -Si il y a un `WHERE` , il doit contenir une expression avec la `UInt8` type. C'est généralement une expression avec comparaison et opérateurs logiques. Les lignes où cette expression est évaluée à 0 sont exclues des transformations ou des résultats ultérieurs. - -`WHERE` expression est évaluée sur la possibilité d'utiliser des index et l'élagage de partition, si le moteur de table sous-jacent le prend en charge. - -!!! note "Note" - Il y a une optimisation de filtrage appelée [prewhere](prewhere.md). diff --git a/docs/fr/sql-reference/statements/select/with.md b/docs/fr/sql-reference/statements/select/with.md deleted file mode 100644 index a42aedf460b..00000000000 --- a/docs/fr/sql-reference/statements/select/with.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ---- - -# AVEC la Clause {#with-clause} - -Cette section prend en charge les Expressions de Table courantes ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), de sorte que les résultats de `WITH` la clause peut être utilisé à l'intérieur `SELECT` clause. - -## Limitation {#limitations} - -1. Les requêtes récursives ne sont pas prises en charge. -2. Lorsque la sous-requête est utilisée à l'intérieur avec section, son résultat doit être scalaire avec exactement une ligne. -3. Les résultats d'Expression ne sont pas disponibles dans les sous-requêtes. - -## Exemple {#examples} - -**Exemple 1:** Utilisation d'une expression constante comme “variable” - -``` sql -WITH '2019-08-01 15:23:00' as ts_upper_bound -SELECT * -FROM hits -WHERE - EventDate = toDate(ts_upper_bound) AND - EventTime <= ts_upper_bound -``` - -**Exemple 2:** De les expulser, somme(octets) résultat de l'expression de clause SELECT de la liste de colonnes - -``` sql -WITH sum(bytes) as s -SELECT - formatReadableSize(s), - table -FROM system.parts -GROUP BY table -ORDER BY s -``` - -**Exemple 3:** Utilisation des résultats de la sous-requête scalaire - -``` sql -/* this example would return TOP 10 of most huge tables */ -WITH - ( - SELECT sum(bytes) - FROM system.parts - WHERE active - ) AS total_disk_usage -SELECT - (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, - table -FROM system.parts -GROUP BY table -ORDER BY table_disk_usage DESC -LIMIT 10 -``` - -**Exemple 4:** Réutilisation de l'expression dans la sous-requête - -Comme solution de contournement pour la limitation actuelle de l'utilisation de l'expression dans les sous-requêtes, Vous pouvez la dupliquer. - -``` sql -WITH ['hello'] AS hello -SELECT - hello, - * -FROM -( - WITH ['hello'] AS hello - SELECT hello -) -``` - -``` text -┌─hello─────┬─hello─────┐ -│ ['hello'] │ ['hello'] │ -└───────────┴───────────┘ -``` diff --git a/docs/fr/sql-reference/statements/show.md b/docs/fr/sql-reference/statements/show.md deleted file mode 100644 index 129c6e30d1c..00000000000 --- a/docs/fr/sql-reference/statements/show.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 38 -toc_title: SHOW ---- - -# Afficher les requêtes {#show-queries} - -## SHOW CREATE TABLE {#show-create-table} - -``` sql -SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] -``` - -Renvoie un seul `String`-type ‘statement’ column, which contains a single value – the `CREATE` requête utilisée pour créer l'objet spécifié. - -## SHOW DATABASES {#show-databases} - -``` sql -SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] -``` - -Imprime une liste de toutes les bases de données. -Cette requête est identique à `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. - -## SHOW PROCESSLIST {#show-processlist} - -``` sql -SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] -``` - -Sorties le contenu de la [système.processus](../../operations/system-tables.md#system_tables-processes) table, qui contient une liste de requêtes en cours de traitement en ce moment, à l'exception `SHOW PROCESSLIST` requête. - -Le `SELECT * FROM system.processes` requête renvoie des données sur toutes les requêtes en cours. - -Astuce (exécuter dans la console): - -``` bash -$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" -``` - -## SHOW TABLES {#show-tables} - -Affiche une liste de tableaux. - -``` sql -SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMIT ] [INTO OUTFILE ] [FORMAT ] -``` - -Si l' `FROM` la clause n'est pas spécifié, la requête renvoie la liste des tables de la base de données actuelle. - -Vous pouvez obtenir les mêmes résultats que l' `SHOW TABLES` requête de la façon suivante: - -``` sql -SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] -``` - -**Exemple** - -La requête suivante sélectionne les deux premières lignes de la liste des tables `system` base de données, dont les noms contiennent `co`. - -``` sql -SHOW TABLES FROM system LIKE '%co%' LIMIT 2 -``` - -``` text -┌─name───────────────────────────┐ -│ aggregate_function_combinators │ -│ collations │ -└────────────────────────────────┘ -``` - -## SHOW DICTIONARIES {#show-dictionaries} - -Affiche une liste de [dictionnaires externes](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). - -``` sql -SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] -``` - -Si l' `FROM` la clause n'est pas spécifié, la requête retourne la liste des dictionnaires de la base de données actuelle. - -Vous pouvez obtenir les mêmes résultats que l' `SHOW DICTIONARIES` requête de la façon suivante: - -``` sql -SELECT name FROM system.dictionaries WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] -``` - -**Exemple** - -La requête suivante sélectionne les deux premières lignes de la liste des tables `system` base de données, dont les noms contiennent `reg`. - -``` sql -SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 -``` - -``` text -┌─name─────────┐ -│ regions │ -│ region_names │ -└──────────────┘ -``` - -## SHOW GRANTS {#show-grants-statement} - -Montre les privilèges d'un utilisateur. - -### Syntaxe {#show-grants-syntax} - -``` sql -SHOW GRANTS [FOR user] -``` - -Si l'utilisateur n'est pas spécifié, la requête renvoie les privilèges de l'utilisateur actuel. - -## SHOW CREATE USER {#show-create-user-statement} - -Affiche les paramètres qui ont été utilisés [la création d'un utilisateur](create.md#create-user-statement). - -`SHOW CREATE USER` ne produit pas de mots de passe utilisateur. - -### Syntaxe {#show-create-user-syntax} - -``` sql -SHOW CREATE USER [name | CURRENT_USER] -``` - -## SHOW CREATE ROLE {#show-create-role-statement} - -Affiche les paramètres qui ont été utilisés [la création de rôle](create.md#create-role-statement) - -### Syntaxe {#show-create-role-syntax} - -``` sql -SHOW CREATE ROLE name -``` - -## SHOW CREATE ROW POLICY {#show-create-row-policy-statement} - -Affiche les paramètres qui ont été utilisés [création de stratégie de ligne](create.md#create-row-policy-statement) - -### Syntaxe {#show-create-row-policy-syntax} - -``` sql -SHOW CREATE [ROW] POLICY name ON [database.]table -``` - -## SHOW CREATE QUOTA {#show-create-quota-statement} - -Affiche les paramètres qui ont été utilisés [quota de création](create.md#create-quota-statement) - -### Syntaxe {#show-create-row-policy-syntax} - -``` sql -SHOW CREATE QUOTA [name | CURRENT] -``` - -## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile-statement} - -Affiche les paramètres qui ont été utilisés [configuration création de profil](create.md#create-settings-profile-statement) - -### Syntaxe {#show-create-row-policy-syntax} - -``` sql -SHOW CREATE [SETTINGS] PROFILE name -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/fr/sql-reference/statements/system.md b/docs/fr/sql-reference/statements/system.md deleted file mode 100644 index e8c9ed85cbc..00000000000 --- a/docs/fr/sql-reference/statements/system.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 37 -toc_title: SYSTEM ---- - -# SYSTÈME de Requêtes {#query-language-system} - -- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) -- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) -- [DROP DNS CACHE](#query_language-system-drop-dns-cache) -- [DROP MARK CACHE](#query_language-system-drop-mark-cache) -- [FLUSH LOGS](#query_language-system-flush_logs) -- [RELOAD CONFIG](#query_language-system-reload-config) -- [SHUTDOWN](#query_language-system-shutdown) -- [KILL](#query_language-system-kill) -- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) -- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) -- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) -- [STOP MERGES](#query_language-system-stop-merges) -- [START MERGES](#query_language-system-start-merges) - -## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} - -Recharge tous les dictionnaires qui ont déjà été chargés avec succès. -Par défaut, les dictionnaires sont chargés paresseusement (voir [dictionaries_lazy_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), donc au lieu d'être chargés automatiquement au démarrage, ils sont initialisés lors du premier accès via la fonction dictGet ou sélectionnez dans les tables avec ENGINE = Dictionary . Le `SYSTEM RELOAD DICTIONARIES` query recharge ces dictionnaires (chargés). -Retourne toujours `Ok.` quel que soit le résultat de la mise à jour du dictionnaire. - -## Recharger le dictionnaire Dictionary_name {#query_language-system-reload-dictionary} - -Recharge complètement un dictionnaire `dictionary_name`, quel que soit l'état du dictionnaire (LOADED / NOT_LOADED / FAILED). -Retourne toujours `Ok.` quel que soit le résultat de la mise à jour du dictionnaire. -L'état du dictionnaire peut être vérifié en interrogeant le `system.dictionaries` table. - -``` sql -SELECT name, status FROM system.dictionaries; -``` - -## DROP DNS CACHE {#query_language-system-drop-dns-cache} - -Réinitialise le cache DNS interne de ClickHouse. Parfois (pour les anciennes versions de ClickHouse), il est nécessaire d'utiliser cette commande lors de la modification de l'infrastructure (modification de l'adresse IP d'un autre serveur ClickHouse ou du serveur utilisé par les dictionnaires). - -Pour une gestion du cache plus pratique (automatique), voir paramètres disable_internal_dns_cache, dns_cache_update_period. - -## DROP MARK CACHE {#query_language-system-drop-mark-cache} - -Réinitialise le cache de marque. Utilisé dans le développement de ClickHouse et des tests de performance. - -## FLUSH LOGS {#query_language-system-flush_logs} - -Flushes buffers of log messages to system tables (e.g. system.query_log). Allows you to not wait 7.5 seconds when debugging. - -## RELOAD CONFIG {#query_language-system-reload-config} - -Recharge la configuration de ClickHouse. Utilisé lorsque la configuration est stockée dans ZooKeeeper. - -## SHUTDOWN {#query_language-system-shutdown} - -Normalement ferme ClickHouse (comme `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) - -## KILL {#query_language-system-kill} - -Annule le processus de ClickHouse (comme `kill -9 {$ pid_clickhouse-server}`) - -## Gestion Des Tables Distribuées {#query-language-system-distributed} - -ClickHouse peut gérer [distribué](../../engines/table-engines/special/distributed.md) table. Lorsqu'un utilisateur insère des données dans ces tables, ClickHouse crée d'abord une file d'attente des données qui doivent être envoyées aux nœuds de cluster, puis l'envoie de manière asynchrone. Vous pouvez gérer le traitement des files d'attente avec [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), et [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) requête. Vous pouvez également insérer de manière synchrone des données distribuées avec `insert_distributed_sync` paramètre. - -### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} - -Désactive la distribution de données en arrière-plan lors de l'insertion de données dans des tables distribuées. - -``` sql -SYSTEM STOP DISTRIBUTED SENDS [db.] -``` - -### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} - -Force ClickHouse à envoyer des données aux nœuds de cluster de manière synchrone. Si des nœuds ne sont pas disponibles, ClickHouse lève une exception et arrête l'exécution de la requête. Vous pouvez réessayer la requête jusqu'à ce qu'elle réussisse, ce qui se produira lorsque tous les nœuds seront de nouveau en ligne. - -``` sql -SYSTEM FLUSH DISTRIBUTED [db.] -``` - -### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} - -Active la distribution de données en arrière-plan lors de l'insertion de données dans des tables distribuées. - -``` sql -SYSTEM START DISTRIBUTED SENDS [db.] -``` - -### STOP MERGES {#query_language-system-stop-merges} - -Offre la possibilité d'arrêter les fusions d'arrière-plan pour les tables de la famille MergeTree: - -``` sql -SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] -``` - -!!! note "Note" - `DETACH / ATTACH` table va commencer les fusions d'arrière-plan pour la table même dans le cas où les fusions ont été arrêtées pour toutes les tables MergeTree auparavant. - -### START MERGES {#query_language-system-start-merges} - -Offre la possibilité de démarrer des fusions en arrière-plan pour les tables de la famille MergeTree: - -``` sql -SYSTEM START MERGES [[db.]merge_tree_family_table_name] -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/fr/sql-reference/syntax.md b/docs/fr/sql-reference/syntax.md deleted file mode 100644 index b8b24c9bbb5..00000000000 --- a/docs/fr/sql-reference/syntax.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 31 -toc_title: Syntaxe ---- - -# Syntaxe {#syntax} - -Il existe deux types d'analyseurs dans le système: L'analyseur SQL complet (un analyseur de descente récursif) et l'analyseur de format de données (un analyseur de flux rapide). -Dans tous les cas à l'exception de la `INSERT` requête, seul L'analyseur SQL complet est utilisé. -Le `INSERT` requête utilise les deux analyseurs: - -``` sql -INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') -``` - -Le `INSERT INTO t VALUES` fragment est analysé par l'analyseur complet, et les données `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` est analysé par l'analyseur de flux rapide. Vous pouvez également activer l'analyseur complet pour les données à l'aide de la [input_format_values_interpret_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) paramètre. Lorsque `input_format_values_interpret_expressions = 1`, ClickHouse essaie d'abord d'analyser les valeurs avec l'analyseur de flux rapide. S'il échoue, ClickHouse essaie d'utiliser l'analyseur complet pour les données, en le traitant comme un SQL [expression](#syntax-expressions). - -Les données peuvent avoir n'importe quel format. Lorsqu'une requête est reçue, le serveur calcule pas plus que [max_query_size](../operations/settings/settings.md#settings-max_query_size) octets de la requête en RAM (par défaut, 1 Mo), et le reste est analysé en flux. -Il permet d'éviter les problèmes avec de grandes `INSERT` requête. - -Lors de l'utilisation de la `Values` format dans un `INSERT` de la requête, il peut sembler que les données sont analysées de même que les expressions dans un `SELECT` requête, mais ce n'est pas vrai. Le `Values` le format est beaucoup plus limitée. - -Le reste de cet article couvre l'analyseur complet. Pour plus d'informations sur les analyseurs de format, consultez [Format](../interfaces/formats.md) section. - -## Espace {#spaces} - -Il peut y avoir n'importe quel nombre de symboles d'espace entre les constructions syntaxiques (y compris le début et la fin d'une requête). Les symboles d'espace incluent l'espace, l'onglet, le saut de ligne, Le CR et le flux de formulaire. - -## Commentaire {#comments} - -ClickHouse prend en charge les commentaires de style SQL et de style C. -Les commentaires de style SQL commencent par `--` et continuer jusqu'à la fin de la ligne, un espace après `--` peut être omis. -C-style sont de `/*` de `*/`et peut être multiligne, les espaces ne sont pas requis non plus. - -## Mot {#syntax-keywords} - -Les mots clés sont insensibles à la casse lorsqu'ils correspondent à: - -- La norme SQL. Exemple, `SELECT`, `select` et `SeLeCt` sont toutes valides. -- Implémentation dans certains SGBD populaires (MySQL ou Postgres). Exemple, `DateTime` est le même que `datetime`. - -Si le nom du type de données est sensible à la casse peut être vérifié `system.data_type_families` table. - -Contrairement à SQL standard, tous les autres mots clés (y compris les noms de fonctions) sont **sensible à la casse**. - -Mots-clés ne sont pas réservés; ils sont traités comme tels que dans le contexte correspondant. Si vous utilisez [identificateur](#syntax-identifiers) avec le même nom que les mots-clés, placez-les entre guillemets doubles ou backticks. Par exemple, la requête `SELECT "FROM" FROM table_name` est valide si la table `table_name` a colonne avec le nom de `"FROM"`. - -## Identificateur {#syntax-identifiers} - -Les identificateurs sont: - -- Noms de Cluster, de base de données, de table, de partition et de colonne. -- Fonction. -- Types de données. -- [Expression des alias](#syntax-expression_aliases). - -Les identificateurs peuvent être cités ou non cités. Ce dernier est préféré. - -Non identificateurs doivent correspondre à l'expression régulière `^[a-zA-Z_][0-9a-zA-Z_]*$` et ne peut pas être égale à [mot](#syntax-keywords). Exemple: `x, _1, X_y__Z123_.` - -Si vous souhaitez utiliser les identifiants de la même manière que les mots-clés ou si vous souhaitez utiliser d'autres symboles dans les identifiants, citez-le en utilisant des guillemets doubles ou des backticks, par exemple, `"id"`, `` `id` ``. - -## Littéral {#literals} - -Il y a numérique, chaîne de caractères, composé, et `NULL` littéral. - -### Numérique {#numeric} - -Littéral numérique tente d'être analysé: - -- Tout d'abord, comme un nombre signé 64 bits, en utilisant le [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) fonction. -- En cas d'échec, en tant que nombre non signé 64 bits, [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) fonction. -- En cas d'échec, en tant que nombre à virgule flottante [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) fonction. -- Sinon, elle renvoie une erreur. - -La valeur littérale a le plus petit type dans lequel la valeur correspond. -Par exemple, 1 est analysé comme `UInt8`, mais 256 est analysé comme `UInt16`. Pour plus d'informations, voir [Types de données](../sql-reference/data-types/index.md). - -Exemple: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. - -### Chaîne {#syntax-string-literal} - -Seuls les littéraux de chaîne entre guillemets simples sont pris en charge. Le clos de caractères barre oblique inverse échappé. Les séquences d'échappement suivantes ont une valeur spéciale correspondante: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. Dans tous les autres cas, des séquences d'échappement au format `\c`, où `c` est un caractère, sont convertis à `c`. Cela signifie que vous pouvez utiliser les séquences `\'`et`\\`. La valeur aurez l' [Chaîne](../sql-reference/data-types/string.md) type. - -Dans les littéraux de chaîne, vous devez vous échapper d'au moins `'` et `\`. Les guillemets simples peuvent être échappés avec le guillemet simple, littéraux `'It\'s'` et `'It''s'` sont égaux. - -### Composé {#compound} - -Les tableaux sont construits avec des crochets `[1, 2, 3]`. Nuples sont construits avec des supports ronds `(1, 'Hello, world!', 2)`. -Techniquement, ce ne sont pas des littéraux, mais des expressions avec l'opérateur de création de tableau et l'opérateur de création de tuple, respectivement. -Un tableau doit être composé d'au moins un élément, et un tuple doit avoir au moins deux éléments. -Il y a un cas distinct lorsque les tuples apparaissent dans le `IN` clause de a `SELECT` requête. Les résultats de la requête peuvent inclure des tuples, mais les tuples ne peuvent pas être enregistrés dans une base de données (à l'exception des tables avec [Mémoire](../engines/table-engines/special/memory.md) moteur). - -### NULL {#null-literal} - -Indique que la valeur est manquante. - -Afin de stocker `NULL` dans un champ de table, il doit être de la [Nullable](../sql-reference/data-types/nullable.md) type. - -Selon le format de données (entrée ou sortie), `NULL` peut avoir une représentation différente. Pour plus d'informations, consultez la documentation de [formats de données](../interfaces/formats.md#formats). - -Il y a beaucoup de nuances au traitement `NULL`. Par exemple, si au moins l'un des arguments d'une opération de comparaison est `NULL` le résultat de cette opération est également `NULL`. Il en va de même pour la multiplication, l'addition et d'autres opérations. Pour plus d'informations, lisez la documentation pour chaque opération. - -Dans les requêtes, vous pouvez vérifier `NULL` à l'aide de la [IS NULL](operators/index.md#operator-is-null) et [IS NOT NULL](operators/index.md) opérateurs et les fonctions connexes `isNull` et `isNotNull`. - -## Fonction {#functions} - -Les appels de fonction sont écrits comme un identifiant avec une liste d'arguments (éventuellement vide) entre parenthèses. Contrairement à SQL standard, les crochets sont requis, même pour une liste d'arguments vide. Exemple: `now()`. -Il existe des fonctions régulières et agrégées (voir la section “Aggregate functions”). Certaines fonctions d'agrégat peut contenir deux listes d'arguments entre parenthèses. Exemple: `quantile (0.9) (x)`. Ces fonctions d'agrégation sont appelés “parametric” fonctions, et les arguments dans la première liste sont appelés “parameters”. La syntaxe des fonctions d'agrégation sans paramètres est la même que pour les fonctions régulières. - -## Opérateur {#operators} - -Les opérateurs sont convertis en leurs fonctions correspondantes lors de l'analyse des requêtes, en tenant compte de leur priorité et de leur associativité. -Par exemple, l'expression `1 + 2 * 3 + 4` est transformé à `plus(plus(1, multiply(2, 3)), 4)`. - -## Types de données et moteurs de Table de base de données {#data_types-and-database-table-engines} - -Types de données et moteurs de table dans `CREATE` les requêtes sont écrites de la même manière que les identifiants ou les fonctions. En d'autres termes, ils peuvent ou ne peuvent pas contenir une liste d'arguments entre parenthèses. Pour plus d'informations, voir les sections “Data types,” “Table engines,” et “CREATE”. - -## Expression Des Alias {#syntax-expression_aliases} - -Un alias est un nom défini par l'utilisateur pour l'expression dans une requête. - -``` sql -expr AS alias -``` - -- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` clause sans utiliser le `AS` mot. - - For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - - In the [CAST](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. - -- `expr` — Any expression supported by ClickHouse. - - For example, `SELECT column_name * 2 AS double FROM some_table`. - -- `alias` — Name for `expr`. Les alias doivent être conformes à la [identificateur](#syntax-identifiers) syntaxe. - - For example, `SELECT "table t".column_name FROM table_name AS "table t"`. - -### Notes sur l'Utilisation de la {#notes-on-usage} - -Les alias sont globaux pour une requête ou d'une sous-requête, vous pouvez définir un alias dans n'importe quelle partie d'une requête de toute expression. Exemple, `SELECT (1 AS n) + 2, n`. - -Les alias ne sont pas visibles dans les sous-requêtes et entre les sous-requêtes. Par exemple, lors de l'exécution de la requête `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` Clickhouse génère l'exception `Unknown identifier: num`. - -Si un alias est défini pour les colonnes de `SELECT` la clause d'une sous-requête, ces colonnes sont visibles dans la requête externe. Exemple, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. - -Soyez prudent avec les Alias qui sont les mêmes que les noms de colonnes ou de tables. Considérons l'exemple suivant: - -``` sql -CREATE TABLE t -( - a Int, - b Int -) -ENGINE = TinyLog() -``` - -``` sql -SELECT - argMax(a, b), - sum(b) AS b -FROM t -``` - -``` text -Received exception from server (version 18.14.17): -Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. -``` - -Dans cet exemple, nous avons déclaré table `t` avec la colonne `b`. Ensuite, lors de la sélection des données, nous avons défini le `sum(b) AS b` alias. Comme les alias sont globaux, ClickHouse a substitué le littéral `b` dans l'expression `argMax(a, b)` avec l'expression `sum(b)`. Cette substitution a provoqué l'exception. - -## Astérisque {#asterisk} - -Dans un `SELECT` requête, un astérisque peut remplacer l'expression. Pour plus d'informations, consultez la section “SELECT”. - -## Expression {#syntax-expressions} - -Une expression est une fonction, un identifiant, un littéral, une application d'un opérateur, une expression entre parenthèses, une sous-requête ou un astérisque. Il peut également contenir un alias. -Une liste des expressions est une ou plusieurs expressions séparées par des virgules. -Les fonctions et les opérateurs, à leur tour, peuvent avoir des expressions comme arguments. - -[Article Original](https://clickhouse.tech/docs/en/sql_reference/syntax/) diff --git a/docs/fr/sql-reference/table-functions/file.md b/docs/fr/sql-reference/table-functions/file.md deleted file mode 100644 index a58821d021d..00000000000 --- a/docs/fr/sql-reference/table-functions/file.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 37 -toc_title: fichier ---- - -# fichier {#file} - -Crée un tableau à partir d'un fichier. Cette fonction de table est similaire à [URL](url.md) et [hdfs](hdfs.md) ceux. - -``` sql -file(path, format, structure) -``` - -**Les paramètres d'entrée** - -- `path` — The relative path to the file from [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Chemin d'accès à la prise en charge des fichiers suivant les globs en mode Lecture seule: `*`, `?`, `{abc,def}` et `{N..M}` où `N`, `M` — numbers, \``'abc', 'def'` — strings. -- `format` — The [format](../../interfaces/formats.md#formats) de le fichier. -- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. - -**Valeur renvoyée** - -Une table avec la structure spécifiée pour lire ou écrire des données dans le fichier spécifié. - -**Exemple** - -Paramètre `user_files_path` et le contenu du fichier `test.csv`: - -``` bash -$ grep user_files_path /etc/clickhouse-server/config.xml - /var/lib/clickhouse/user_files/ - -$ cat /var/lib/clickhouse/user_files/test.csv - 1,2,3 - 3,2,1 - 78,43,45 -``` - -Table de`test.csv` et la sélection des deux premières lignes de ce: - -``` sql -SELECT * -FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') -LIMIT 2 -``` - -``` text -┌─column1─┬─column2─┬─column3─┐ -│ 1 │ 2 │ 3 │ -│ 3 │ 2 │ 1 │ -└─────────┴─────────┴─────────┘ -``` - -``` sql --- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file -SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 -``` - -**Globs dans le chemin** - -Plusieurs composants de chemin peuvent avoir des globs. Pour être traité, le fichier doit exister et correspondre à l'ensemble du modèle de chemin (pas seulement le suffixe ou le préfixe). - -- `*` — Substitutes any number of any characters except `/` y compris la chaîne vide. -- `?` — Substitutes any single character. -- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. - -Les Constructions avec `{}` sont similaires à l' [fonction de table à distance](../../sql-reference/table-functions/remote.md)). - -**Exemple** - -1. Supposons que nous ayons plusieurs fichiers avec les chemins relatifs suivants: - -- ‘some_dir/some_file_1’ -- ‘some_dir/some_file_2’ -- ‘some_dir/some_file_3’ -- ‘another_dir/some_file_1’ -- ‘another_dir/some_file_2’ -- ‘another_dir/some_file_3’ - -1. Interroger la quantité de lignes dans ces fichiers: - - - -``` sql -SELECT count(*) -FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') -``` - -1. Requête de la quantité de lignes dans tous les fichiers de ces deux répertoires: - - - -``` sql -SELECT count(*) -FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32') -``` - -!!! warning "Avertissement" - Si votre liste de fichiers contient des plages de nombres avec des zéros en tête, utilisez la construction avec des accolades pour chaque chiffre séparément ou utilisez `?`. - -**Exemple** - -Interroger les données des fichiers nommés `file000`, `file001`, … , `file999`: - -``` sql -SELECT count(*) -FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') -``` - -## Les Colonnes Virtuelles {#virtual-columns} - -- `_path` — Path to the file. -- `_file` — Name of the file. - -**Voir Aussi** - -- [Les colonnes virtuelles](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/fr/sql-reference/table-functions/generate.md b/docs/fr/sql-reference/table-functions/generate.md deleted file mode 100644 index 1f7eeddd0e1..00000000000 --- a/docs/fr/sql-reference/table-functions/generate.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 47 -toc_title: generateRandom ---- - -# generateRandom {#generaterandom} - -Génère des données aléatoires avec un schéma donné. -Permet de remplir des tables de test avec des données. -Prend en charge tous les types de données qui peuvent être stockés dans la table sauf `LowCardinality` et `AggregateFunction`. - -``` sql -generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); -``` - -**Paramètre** - -- `name` — Name of corresponding column. -- `TypeName` — Type of corresponding column. -- `max_array_length` — Maximum array length for all generated arrays. Defaults to `10`. -- `max_string_length` — Maximum string length for all generated strings. Defaults to `10`. -- `random_seed` — Specify random seed manually to produce stable results. If NULL — seed is randomly generated. - -**Valeur Renvoyée** - -Un objet de table avec le schéma demandé. - -## Exemple D'Utilisation {#usage-example} - -``` sql -SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2) LIMIT 3; -``` - -``` text -┌─a────────┬────────────d─┬─c──────────────────────────────────────────────────────────────────┐ -│ [77] │ -124167.6723 │ ('2061-04-17 21:59:44.573','3f72f405-ec3e-13c8-44ca-66ef335f7835') │ -│ [32,110] │ -141397.7312 │ ('1979-02-09 03:43:48.526','982486d1-5a5d-a308-e525-7bd8b80ffa73') │ -│ [68] │ -67417.0770 │ ('2080-03-12 14:17:31.269','110425e5-413f-10a6-05ba-fa6b3e929f15') │ -└──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/fr/sql-reference/table-functions/hdfs.md b/docs/fr/sql-reference/table-functions/hdfs.md deleted file mode 100644 index 51b742d8018..00000000000 --- a/docs/fr/sql-reference/table-functions/hdfs.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 45 -toc_title: hdfs ---- - -# hdfs {#hdfs} - -Crée une table à partir de fichiers dans HDFS. Cette fonction de table est similaire à [URL](url.md) et [fichier](file.md) ceux. - -``` sql -hdfs(URI, format, structure) -``` - -**Les paramètres d'entrée** - -- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` et `{N..M}` où `N`, `M` — numbers, \``'abc', 'def'` — strings. -- `format` — The [format](../../interfaces/formats.md#formats) de le fichier. -- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. - -**Valeur renvoyée** - -Une table avec la structure spécifiée pour lire ou écrire des données dans le fichier spécifié. - -**Exemple** - -Table de `hdfs://hdfs1:9000/test` et la sélection des deux premières lignes de ce: - -``` sql -SELECT * -FROM hdfs('hdfs://hdfs1:9000/test', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') -LIMIT 2 -``` - -``` text -┌─column1─┬─column2─┬─column3─┐ -│ 1 │ 2 │ 3 │ -│ 3 │ 2 │ 1 │ -└─────────┴─────────┴─────────┘ -``` - -**Globs dans le chemin** - -Plusieurs composants de chemin peuvent avoir des globs. Pour être traité, le fichier doit exister et correspondre à l'ensemble du modèle de chemin (pas seulement le suffixe ou le préfixe). - -- `*` — Substitutes any number of any characters except `/` y compris la chaîne vide. -- `?` — Substitutes any single character. -- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. - -Les Constructions avec `{}` sont similaires à l' [fonction de table à distance](../../sql-reference/table-functions/remote.md)). - -**Exemple** - -1. Supposons que nous ayons plusieurs fichiers avec les URI suivants sur HDFS: - -- ‘hdfs://hdfs1:9000/some_dir/some_file_1’ -- ‘hdfs://hdfs1:9000/some_dir/some_file_2’ -- ‘hdfs://hdfs1:9000/some_dir/some_file_3’ -- ‘hdfs://hdfs1:9000/another_dir/some_file_1’ -- ‘hdfs://hdfs1:9000/another_dir/some_file_2’ -- ‘hdfs://hdfs1:9000/another_dir/some_file_3’ - -1. Interroger la quantité de lignes dans ces fichiers: - - - -``` sql -SELECT count(*) -FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') -``` - -1. Requête de la quantité de lignes dans tous les fichiers de ces deux répertoires: - - - -``` sql -SELECT count(*) -FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32') -``` - -!!! warning "Avertissement" - Si votre liste de fichiers contient des plages de nombres avec des zéros en tête, utilisez la construction avec des accolades pour chaque chiffre séparément ou utilisez `?`. - -**Exemple** - -Interroger les données des fichiers nommés `file000`, `file001`, … , `file999`: - -``` sql -SELECT count(*) -FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') -``` - -## Les Colonnes Virtuelles {#virtual-columns} - -- `_path` — Path to the file. -- `_file` — Name of the file. - -**Voir Aussi** - -- [Les colonnes virtuelles](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/fr/sql-reference/table-functions/index.md b/docs/fr/sql-reference/table-functions/index.md deleted file mode 100644 index 89a8200e385..00000000000 --- a/docs/fr/sql-reference/table-functions/index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Les Fonctions De Table -toc_priority: 34 -toc_title: Introduction ---- - -# Les Fonctions De Table {#table-functions} - -Les fonctions de Table sont des méthodes pour construire des tables. - -Vous pouvez utiliser les fonctions de table dans: - -- [FROM](../statements/select/from.md) la clause de la `SELECT` requête. - - The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. - -- [Créer une TABLE en tant que \< table_function ()\>](../statements/create.md#create-table-query) requête. - - It's one of the methods of creating a table. - -!!! warning "Avertissement" - Vous ne pouvez pas utiliser les fonctions de table si [allow_ddl](../../operations/settings/permissions-for-queries.md#settings_allow_ddl) paramètre est désactivé. - -| Fonction | Description | -|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------| -| [fichier](file.md) | Crée un [Fichier](../../engines/table-engines/special/file.md)-moteur de table. | -| [fusionner](merge.md) | Crée un [Fusionner](../../engines/table-engines/special/merge.md)-moteur de table. | -| [nombre](numbers.md) | Crée une table avec une seule colonne remplie de nombres entiers. | -| [distant](remote.md) | Vous permet d'accéder à des serveurs distants sans [Distribué](../../engines/table-engines/special/distributed.md)-moteur de table. | -| [URL](url.md) | Crée un [URL](../../engines/table-engines/special/url.md)-moteur de table. | -| [mysql](mysql.md) | Crée un [MySQL](../../engines/table-engines/integrations/mysql.md)-moteur de table. | -| [jdbc](jdbc.md) | Crée un [JDBC](../../engines/table-engines/integrations/jdbc.md)-moteur de table. | -| [ODBC](odbc.md) | Crée un [ODBC](../../engines/table-engines/integrations/odbc.md)-moteur de table. | -| [hdfs](hdfs.md) | Crée un [HDFS](../../engines/table-engines/integrations/hdfs.md)-moteur de table. | - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/fr/sql-reference/table-functions/input.md b/docs/fr/sql-reference/table-functions/input.md deleted file mode 100644 index 21e0eacb5c1..00000000000 --- a/docs/fr/sql-reference/table-functions/input.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 46 -toc_title: "entr\xE9e" ---- - -# entrée {#input} - -`input(structure)` - fonction de table qui permet effectivement convertir et insérer des données envoyées à la -serveur avec une structure donnée à la table avec une autre structure. - -`structure` - structure de données envoyées au serveur dans le format suivant `'column1_name column1_type, column2_name column2_type, ...'`. -Exemple, `'id UInt32, name String'`. - -Cette fonction peut être utilisée uniquement dans `INSERT SELECT` requête et une seule fois mais se comporte autrement comme une fonction de table ordinaire -(par exemple, il peut être utilisé dans la sous-requête, etc.). - -Les données peuvent être envoyées de quelque manière que ce soit comme pour ordinaire `INSERT` requête et passé dans tout disponible [format](../../interfaces/formats.md#formats) -qui doit être spécifié à la fin de la requête (contrairement à l'ordinaire `INSERT SELECT`). - -La caractéristique principale de cette fonction est que lorsque le serveur reçoit des données du client il les convertit simultanément -selon la liste des expressions dans le `SELECT` clause et insère dans la table cible. Table temporaire -avec toutes les données transférées n'est pas créé. - -**Exemple** - -- Laissez le `test` le tableau a la structure suivante `(a String, b String)` - et les données `data.csv` a une structure différente `(col1 String, col2 Date, col3 Int32)`. Requête pour insérer - les données de l' `data.csv` dans le `test` table avec conversion simultanée ressemble à ceci: - - - -``` bash -$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; -``` - -- Si `data.csv` contient les données de la même structure `test_structure` comme la table `test` puis ces deux requêtes sont égales: - - - -``` bash -$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" -$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/fr/sql-reference/table-functions/jdbc.md b/docs/fr/sql-reference/table-functions/jdbc.md deleted file mode 100644 index 76dea0e0930..00000000000 --- a/docs/fr/sql-reference/table-functions/jdbc.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 43 -toc_title: jdbc ---- - -# jdbc {#table-function-jdbc} - -`jdbc(jdbc_connection_uri, schema, table)` - retourne la table qui est connectée via le pilote JDBC. - -Ce tableau fonction nécessite séparé `clickhouse-jdbc-bridge` programme en cours d'exécution. -Il prend en charge les types Nullable (basé sur DDL de la table distante qui est interrogée). - -**Exemple** - -``` sql -SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') -``` - -``` sql -SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') -``` - -``` sql -SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/fr/sql-reference/table-functions/merge.md b/docs/fr/sql-reference/table-functions/merge.md deleted file mode 100644 index 1ec264b06bd..00000000000 --- a/docs/fr/sql-reference/table-functions/merge.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 38 -toc_title: fusionner ---- - -# fusionner {#merge} - -`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. - -La structure de la table est tirée de la première table rencontrée qui correspond à l'expression régulière. - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/fr/sql-reference/table-functions/mysql.md b/docs/fr/sql-reference/table-functions/mysql.md deleted file mode 100644 index 295456914f0..00000000000 --- a/docs/fr/sql-reference/table-functions/mysql.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 42 -toc_title: mysql ---- - -# mysql {#mysql} - -Permettre `SELECT` requêtes à effectuer sur des données stockées sur un serveur MySQL distant. - -``` sql -mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); -``` - -**Paramètre** - -- `host:port` — MySQL server address. - -- `database` — Remote database name. - -- `table` — Remote table name. - -- `user` — MySQL user. - -- `password` — User password. - -- `replace_query` — Flag that converts `INSERT INTO` les requêtes de `REPLACE INTO`. Si `replace_query=1` la requête est remplacé. - -- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expression qui est ajoutée à la `INSERT` requête. - - Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. - - To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. - -Simple `WHERE` des clauses telles que `=, !=, >, >=, <, <=` sont actuellement exécutés sur le serveur MySQL. - -Le reste des conditions et le `LIMIT` les contraintes d'échantillonnage sont exécutées dans ClickHouse uniquement après la fin de la requête à MySQL. - -**Valeur Renvoyée** - -Un objet table avec les mêmes colonnes que la table MySQL d'origine. - -## Exemple D'Utilisation {#usage-example} - -Table dans MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+------+----------+-----+----------+ -| int_id | int_nullable | float | float_nullable | -+------+----------+-----+----------+ -| 1 | NULL | 2 | NULL | -+------+----------+-----+----------+ -1 row in set (0,00 sec) -``` - -Sélection des données de ClickHouse: - -``` sql -SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') -``` - -``` text -┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ -└────────┴──────────────┴───────┴────────────────┘ -``` - -## Voir Aussi {#see-also} - -- [Le ‘MySQL’ tableau moteur](../../engines/table-engines/integrations/mysql.md) -- [Utilisation de MySQL comme source de dictionnaire externe](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql) - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/fr/sql-reference/table-functions/numbers.md b/docs/fr/sql-reference/table-functions/numbers.md deleted file mode 100644 index 50a5ad61002..00000000000 --- a/docs/fr/sql-reference/table-functions/numbers.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 39 -toc_title: nombre ---- - -# nombre {#numbers} - -`numbers(N)` – Returns a table with the single ‘number’ colonne (UInt64) qui contient des entiers de 0 à n-1. -`numbers(N, M)` - Retourne un tableau avec le seul ‘number’ colonne (UInt64) qui contient des entiers de N À (N + M-1). - -Similaire à la `system.numbers` table, il peut être utilisé pour tester et générer des valeurs successives, `numbers(N, M)` plus efficace que `system.numbers`. - -Les requêtes suivantes sont équivalentes: - -``` sql -SELECT * FROM numbers(10); -SELECT * FROM numbers(0, 10); -SELECT * FROM system.numbers LIMIT 10; -``` - -Exemple: - -``` sql --- Generate a sequence of dates from 2010-01-01 to 2010-12-31 -select toDate('2010-01-01') + number as d FROM numbers(365); -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/fr/sql-reference/table-functions/odbc.md b/docs/fr/sql-reference/table-functions/odbc.md deleted file mode 100644 index aae636a5eb2..00000000000 --- a/docs/fr/sql-reference/table-functions/odbc.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 44 -toc_title: ODBC ---- - -# ODBC {#table-functions-odbc} - -Renvoie la table connectée via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). - -``` sql -odbc(connection_settings, external_database, external_table) -``` - -Paramètre: - -- `connection_settings` — Name of the section with connection settings in the `odbc.ini` fichier. -- `external_database` — Name of a database in an external DBMS. -- `external_table` — Name of a table in the `external_database`. - -Pour implémenter en toute sécurité les connexions ODBC, ClickHouse utilise un programme distinct `clickhouse-odbc-bridge`. Si le pilote ODBC est chargé directement depuis `clickhouse-server`, les problèmes de pilote peuvent planter le serveur ClickHouse. Clickhouse démarre automatiquement `clickhouse-odbc-bridge` lorsque cela est nécessaire. Le programme ODBC bridge est installé à partir du même package que `clickhouse-server`. - -Les champs avec l' `NULL` les valeurs de la table externe sont converties en valeurs par défaut pour le type de données de base. Par exemple, si un champ de table MySQL distant a `INT NULL` type il est converti en 0 (la valeur par défaut pour ClickHouse `Int32` type de données). - -## Exemple D'Utilisation {#usage-example} - -**Obtenir des données de L'installation MySQL locale via ODBC** - -Cet exemple est vérifié pour Ubuntu Linux 18.04 et MySQL server 5.7. - -Assurez-vous que unixODBC et MySQL Connector sont installés. - -Par défaut (si installé à partir de paquets), ClickHouse démarre en tant qu'utilisateur `clickhouse`. Ainsi, vous devez créer et configurer cet utilisateur dans le serveur MySQL. - -``` bash -$ sudo mysql -``` - -``` sql -mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; -mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; -``` - -Puis configurez la connexion dans `/etc/odbc.ini`. - -``` bash -$ cat /etc/odbc.ini -[mysqlconn] -DRIVER = /usr/local/lib/libmyodbc5w.so -SERVER = 127.0.0.1 -PORT = 3306 -DATABASE = test -USERNAME = clickhouse -PASSWORD = clickhouse -``` - -Vous pouvez vérifier la connexion en utilisant le `isql` utilitaire de l'installation unixODBC. - -``` bash -$ isql -v mysqlconn -+-------------------------+ -| Connected! | -| | -... -``` - -Table dans MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+------+----------+-----+----------+ -| int_id | int_nullable | float | float_nullable | -+------+----------+-----+----------+ -| 1 | NULL | 2 | NULL | -+------+----------+-----+----------+ -1 row in set (0,00 sec) -``` - -Récupération des données de la table MySQL dans ClickHouse: - -``` sql -SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') -``` - -``` text -┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ -│ 1 │ 0 │ 2 │ 0 │ -└────────┴──────────────┴───────┴────────────────┘ -``` - -## Voir Aussi {#see-also} - -- [Dictionnaires externes ODBC](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc) -- [Moteur de table ODBC](../../engines/table-engines/integrations/odbc.md). - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/fr/sql-reference/table-functions/remote.md b/docs/fr/sql-reference/table-functions/remote.md deleted file mode 100644 index 380a9986116..00000000000 --- a/docs/fr/sql-reference/table-functions/remote.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 40 -toc_title: distant ---- - -# à distance, remoteSecure {#remote-remotesecure} - -Vous permet d'accéder à des serveurs distants sans `Distributed` table. - -Signature: - -``` sql -remote('addresses_expr', db, table[, 'user'[, 'password']]) -remote('addresses_expr', db.table[, 'user'[, 'password']]) -remoteSecure('addresses_expr', db, table[, 'user'[, 'password']]) -remoteSecure('addresses_expr', db.table[, 'user'[, 'password']]) -``` - -`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port` ou juste `host`. L'hôte peut être spécifié comme nom de serveur ou l'adresse IPv4 ou IPv6. Une adresse IPv6 est indiquée entre crochets. Le port est le port TCP sur le serveur distant. Si le port est omis, il utilise `tcp_port` à partir du fichier de configuration du serveur (par défaut, 9000). - -!!! important "Important" - Le port est requis pour une adresse IPv6. - -Exemple: - -``` text -example01-01-1 -example01-01-1:9000 -localhost -127.0.0.1 -[::]:9000 -[2a02:6b8:0:1111::11]:9000 -``` - -Plusieurs adresses séparées par des virgules. Dans ce cas, ClickHouse utilisera le traitement distribué, donc il enverra la requête à toutes les adresses spécifiées (comme les fragments avec des données différentes). - -Exemple: - -``` text -example01-01-1,example01-02-1 -``` - -Une partie de l'expression peut être spécifiée entre crochets. L'exemple précédent peut être écrite comme suit: - -``` text -example01-0{1,2}-1 -``` - -Les accolades peuvent contenir une plage de Nombres séparés par deux points (entiers non négatifs). Dans ce cas, la gamme est étendue à un ensemble de valeurs qui génèrent fragment d'adresses. Si le premier nombre commence par zéro, les valeurs sont formées avec le même alignement zéro. L'exemple précédent peut être écrite comme suit: - -``` text -example01-{01..02}-1 -``` - -Si vous avez plusieurs paires d'accolades, il génère le produit direct des ensembles correspondants. - -Les adresses et les parties d'adresses entre crochets peuvent être séparées par le symbole de tuyau (\|). Dans ce cas, les ensembles correspondants de adresses sont interprétés comme des répliques, et la requête sera envoyée à la première sain réplique. Cependant, les répliques sont itérées dans l'ordre actuellement défini dans [équilibrage](../../operations/settings/settings.md) paramètre. - -Exemple: - -``` text -example01-{01..02}-{1|2} -``` - -Cet exemple spécifie deux fragments qui ont chacun deux répliques. - -Le nombre d'adresses générées est limitée par une constante. En ce moment, c'est 1000 adresses. - -À l'aide de la `remote` la fonction de table est moins optimale que la création d'un `Distributed` table, car dans ce cas, la connexion au serveur est rétablie pour chaque requête. En outre, si des noms d'hôte, les noms sont résolus, et les erreurs ne sont pas comptés lors de travail avec diverses répliques. Lors du traitement d'un grand nombre de requêtes, créez toujours `Distributed` table à l'avance, et ne pas utiliser la `remote` table de fonction. - -Le `remote` table de fonction peut être utile dans les cas suivants: - -- Accès à un serveur spécifique pour la comparaison de données, le débogage et les tests. -- Requêtes entre différents clusters ClickHouse à des fins de recherche. -- Demandes distribuées peu fréquentes qui sont faites manuellement. -- Distribué demandes où l'ensemble des serveurs est redéfinie à chaque fois. - -Si l'utilisateur n'est pas spécifié, `default` est utilisée. -Si le mot de passe n'est spécifié, un mot de passe vide est utilisé. - -`remoteSecure` - la même chose que `remote` but with secured connection. Default port — [tcp_port_secure](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) de config ou 9440. - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/fr/sql-reference/table-functions/url.md b/docs/fr/sql-reference/table-functions/url.md deleted file mode 100644 index 1df5cf55526..00000000000 --- a/docs/fr/sql-reference/table-functions/url.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 41 -toc_title: URL ---- - -# URL {#url} - -`url(URL, format, structure)` - retourne une table créée à partir du `URL` avec le -`format` et `structure`. - -URL-adresse du serveur HTTP ou HTTPS, qui peut accepter `GET` et/ou `POST` demande. - -format - [format](../../interfaces/formats.md#formats) des données. - -structure - structure de table dans `'UserID UInt64, Name String'` format. Détermine les noms et les types de colonnes. - -**Exemple** - -``` sql --- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. -SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 -``` - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/fr/whats-new/changelog/2017.md b/docs/fr/whats-new/changelog/2017.md deleted file mode 120000 index d581cbbb422..00000000000 --- a/docs/fr/whats-new/changelog/2017.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/whats-new/changelog/2017.md \ No newline at end of file diff --git a/docs/fr/whats-new/changelog/2018.md b/docs/fr/whats-new/changelog/2018.md deleted file mode 120000 index 22874fcae85..00000000000 --- a/docs/fr/whats-new/changelog/2018.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/whats-new/changelog/2018.md \ No newline at end of file diff --git a/docs/fr/whats-new/changelog/2019.md b/docs/fr/whats-new/changelog/2019.md deleted file mode 120000 index 0f3f095f8a1..00000000000 --- a/docs/fr/whats-new/changelog/2019.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/whats-new/changelog/2019.md \ No newline at end of file diff --git a/docs/fr/whats-new/changelog/index.md b/docs/fr/whats-new/changelog/index.md deleted file mode 120000 index 5461b93ec8c..00000000000 --- a/docs/fr/whats-new/changelog/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/whats-new/changelog/index.md \ No newline at end of file diff --git a/docs/fr/whats-new/index.md b/docs/fr/whats-new/index.md deleted file mode 100644 index 51a77da8ef4..00000000000 --- a/docs/fr/whats-new/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: Ce qui est Nouveau -toc_priority: 72 ---- - - diff --git a/docs/fr/whats-new/roadmap.md b/docs/fr/whats-new/roadmap.md deleted file mode 100644 index 87d64208f67..00000000000 --- a/docs/fr/whats-new/roadmap.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 74 -toc_title: Feuille de route ---- - -# Feuille de route {#roadmap} - -## Q1 2020 {#q1-2020} - -- Contrôle d'accès par rôle - -## Q2 2020 {#q2-2020} - -- Intégration avec les services d'authentification externes -- Pools de ressources pour une répartition plus précise de la capacité du cluster entre les utilisateurs - -{## [Article Original](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/fr/whats-new/security-changelog.md b/docs/fr/whats-new/security-changelog.md deleted file mode 100644 index 6046ef96bb2..00000000000 --- a/docs/fr/whats-new/security-changelog.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_priority: 76 -toc_title: "S\xE9curit\xE9 Changelog" ---- - -## Correction dans la version 19.14.3.3 de ClickHouse, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10} - -### CVE-2019-15024 {#cve-2019-15024} - -Аn attacker that has write access to ZooKeeper and who ican run a custom server available from the network where ClickHouse runs, can create a custom-built malicious server that will act as a ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from the malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. - -Crédits: Eldar Zaitov de L'équipe de sécurité de L'Information Yandex - -### CVE-2019-16535 {#cve-2019-16535} - -Аn OOB read, OOB write and integer underflow in decompression algorithms can be used to achieve RCE or DoS via native protocol. - -Crédits: Eldar Zaitov de L'équipe de sécurité de L'Information Yandex - -### CVE-2019-16536 {#cve-2019-16536} - -Le débordement de pile menant à DoS peut être déclenché par un client authentifié malveillant. - -Crédits: Eldar Zaitov de L'équipe de sécurité de L'Information Yandex - -## Correction de la version 19.13.6.1 de ClickHouse, 2019-09-20 {#fixed-in-clickhouse-release-19-13-6-1-2019-09-20} - -### CVE-2019-18657 {#cve-2019-18657} - -Fonction de Table `url` la vulnérabilité avait-elle permis à l'attaquant d'injecter des en-têtes HTTP arbitraires dans la requête. - -Crédit: [Nikita Tikhomirov](https://github.com/NSTikhomirov) - -## Correction dans la version ClickHouse 18.12.13, 2018-09-10 {#fixed-in-clickhouse-release-18-12-13-2018-09-10} - -### CVE-2018-14672 {#cve-2018-14672} - -Les fonctions de chargement des modèles CatBoost permettaient de parcourir les chemins et de lire des fichiers arbitraires via des messages d'erreur. - -Crédits: Andrey Krasichkov de L'équipe de sécurité de L'Information Yandex - -## Correction dans la version 18.10.3 de ClickHouse, 2018-08-13 {#fixed-in-clickhouse-release-18-10-3-2018-08-13} - -### CVE-2018-14671 {#cve-2018-14671} - -unixODBC a permis de charger des objets partagés arbitraires à partir du système de fichiers, ce qui a conduit à une vulnérabilité D'exécution de Code À Distance. - -Crédits: Andrey Krasichkov et Evgeny Sidorov de Yandex Information Security Team - -## Correction dans la version 1.1.54388 de ClickHouse, 2018-06-28 {#fixed-in-clickhouse-release-1-1-54388-2018-06-28} - -### CVE-2018-14668 {#cve-2018-14668} - -“remote” la fonction de table a permis des symboles arbitraires dans “user”, “password” et “default_database” champs qui ont conduit à des attaques de falsification de requêtes inter-protocoles. - -Crédits: Andrey Krasichkov de L'équipe de sécurité de L'Information Yandex - -## Correction dans la version 1.1.54390 de ClickHouse, 2018-07-06 {#fixed-in-clickhouse-release-1-1-54390-2018-07-06} - -### CVE-2018-14669 {#cve-2018-14669} - -Clickhouse client MySQL avait “LOAD DATA LOCAL INFILE” fonctionnalité activée permettant à une base de données MySQL malveillante de lire des fichiers arbitraires à partir du serveur clickhouse connecté. - -Crédits: Andrey Krasichkov et Evgeny Sidorov de Yandex Information Security Team - -## Correction dans la version 1.1.54131 de ClickHouse, 2017-01-10 {#fixed-in-clickhouse-release-1-1-54131-2017-01-10} - -### CVE-2018-14670 {#cve-2018-14670} - -Configuration incorrecte dans le paquet deb pourrait conduire à l'utilisation non autorisée de la base de données. - -Crédits: National Cyber Security Centre (NCSC) - -{## [Article Original](https://clickhouse.tech/docs/en/security_changelog/) ##} diff --git a/docs/ja/commercial/cloud.md b/docs/ja/commercial/cloud.md index 84f58e46cdb..62fc75ecbda 100644 --- a/docs/ja/commercial/cloud.md +++ b/docs/ja/commercial/cloud.md @@ -22,7 +22,7 @@ toc_title: "\u30AF\u30E9\u30A6\u30C9" ## Alibaba Cloud {#alibaba-cloud} -ClickHouseのためのAlibaba Cloudの管理サービス [中国サイト](https://www.aliyun.com/product/clickhouse) (2021年5月に国際サイトで利用可能になります) 次の主な機能を提供します: +[ClickHouseのためのAlibaba Cloudの管理サービス](https://www.alibabacloud.com/product/clickhouse) 次の主な機能を提供します: - Alibaba Cloud Apsara分散システムをベースにした信頼性の高いクラウドディスクストレージエンジン - 手動でのデータ移行を必要とせずに、オン・デマンドで容量を拡張 diff --git a/docs/ja/development/build.md b/docs/ja/development/build.md index e44ba45485e..191fa665ccd 100644 --- a/docs/ja/development/build.md +++ b/docs/ja/development/build.md @@ -19,28 +19,17 @@ $ sudo apt-get install git cmake python ninja-build 古いシステムではcmakeの代わりにcmake3。 -## GCC9のインストール {#install-gcc-10} +## Clang 11 のインストール -これを行うにはいくつかの方法があります。 +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -### PPAパッケージからインストール {#install-from-a-ppa-package} - -``` bash -$ sudo apt-get install software-properties-common -$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test -$ sudo apt-get update -$ sudo apt-get install gcc-10 g++-10 +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" ``` -### ソースからインスト {#install-from-sources} - -見て [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) - -## ビルドにGCC9を使用する {#use-gcc-10-for-builds} - ``` bash -$ export CC=gcc-10 -$ export CXX=g++-10 +$ export CC=clang +$ export CXX=clang++ ``` ## ツつィツ姪"ツ債ツつケ {#checkout-clickhouse-sources} @@ -76,7 +65,7 @@ $ cd .. - Git(ソースをチェックアウトするためにのみ使用され、ビルドには必要ありません) - CMake3.10以降 - 忍者(推奨)または作る -- C++コンパイラ:gcc9またはclang8以降 +- C++コンパイラ:clang11以降 - リンカ:lldまたはgold(古典的なGNU ldは動作しません) - Python(LLVMビルド内でのみ使用され、オプションです) diff --git a/docs/ja/development/developer-instruction.md b/docs/ja/development/developer-instruction.md index ccc3a177d1f..d7e5217b3b6 100644 --- a/docs/ja/development/developer-instruction.md +++ b/docs/ja/development/developer-instruction.md @@ -133,19 +133,19 @@ ArchまたはGentooを使用する場合は、おそらくCMakeのインスト ClickHouseはビルドに複数の外部ライブラリを使用します。 それらのすべては、サブモジュールにあるソースからClickHouseと一緒に構築されているので、別々にインストールする必要はありません。 リストは次の場所で確認できます `contrib`. -# C++コンパイラ {#c-compiler} +## C++ Compiler {#c-compiler} -ClickHouseのビルドには、バージョン9以降のGCCとClangバージョン8以降のコンパイラがサポートされます。 +Compilers Clang starting from version 11 is supported for building ClickHouse. -公式のYandexビルドは、わずかに優れたパフォーマンスのマシンコードを生成するため、GCCを使用しています(私たちのベンチマークに応じて最大数パーセントの そしてClangは開発のために通常より便利です。 が、当社の継続的インテグレーション(CI)プラットフォームを運チェックのための十数の組み合わせとなります。 +Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations. -UBUNTUにGCCをインストールするには: `sudo apt install gcc g++` +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -Gccのバージョンを確認する: `gcc --version`. の場合は下記9その指示に従う。https://clickhouse.tech/docs/ja/development/build/#install-gcc-10. +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` -Mac OS XのビルドはClangでのみサポートされています。 ちょうど実行 `brew install llvm` - -Clangを使用する場合は、次のものもインストールできます `libc++` と `lld` あなたがそれが何であるか知っていれば。 を使用して `ccache` また、推奨されます。 +Mac OS X build is also supported. Just run `brew install llvm` # 建築プロセス {#the-building-process} @@ -158,13 +158,6 @@ ClickHouseを構築する準備ができたので、別のディレクトリを 中の間 `build` cmakeを実行してビルドを構成します。 最初の実行の前に、コンパイラ(この例ではバージョン9gccコンパイラ)を指定する環境変数を定義する必要があります。 -Linux: - - export CC=gcc-10 CXX=g++-10 - cmake .. - -Mac OS X: - export CC=clang CXX=clang++ cmake .. diff --git a/docs/ja/getting-started/example-datasets/ontime.md b/docs/ja/getting-started/example-datasets/ontime.md index bd049e8caad..d12d8a36069 100644 --- a/docs/ja/getting-started/example-datasets/ontime.md +++ b/docs/ja/getting-started/example-datasets/ontime.md @@ -29,126 +29,127 @@ done テーブルの作成: ``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String +CREATE TABLE `ontime` +( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `Reporting_Airline` String, + `DOT_ID_Reporting_Airline` Int32, + `IATA_CODE_Reporting_Airline` String, + `Tail_Number` Int32, + `Flight_Number_Reporting_Airline` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Nullable(Int32), + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String ) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; + PARTITION BY Year + ORDER BY (IATA_CODE_Reporting_Airline, FlightDate) + SETTINGS index_granularity = 8192; ``` データのロード: ``` bash -$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'" ``` ## パーティション済みデータのダウンロード {#download-of-prepared-partitions} @@ -212,10 +213,10 @@ LIMIT 10; Q4. 2007年のキャリア別の遅延の数 ``` sql -SELECT Carrier, count(*) +SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 -GROUP BY Carrier +GROUP BY IATA_CODE_Reporting_Airline ORDER BY count(*) DESC; ``` @@ -226,32 +227,32 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` 同じクエリのより良いバージョン: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year=2007 -GROUP BY Carrier +GROUP BY IATA_CODE_Reporting_Airline ORDER BY c3 DESC ``` @@ -262,29 +263,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` 同じクエリのより良いバージョン: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier @@ -303,7 +304,7 @@ FROM from ontime WHERE DepDelay>10 GROUP BY Year -) +) q JOIN ( select @@ -311,7 +312,7 @@ JOIN count(*) as c2 from ontime GROUP BY Year -) USING (Year) +) qq USING (Year) ORDER BY Year; ``` @@ -346,7 +347,7 @@ Q10. ``` sql SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, + min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, sum(ArrDelayMinutes>30) AS flights_delayed, round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime diff --git a/docs/ja/sql-reference/aggregate-functions/reference.md b/docs/ja/sql-reference/aggregate-functions/reference.md index 465f36179da..c66e9b54746 100644 --- a/docs/ja/sql-reference/aggregate-functions/reference.md +++ b/docs/ja/sql-reference/aggregate-functions/reference.md @@ -624,7 +624,7 @@ uniqHLL12(x[, ...]) - HyperLogLogアルゴリズムを使用して、異なる引数値の数を近似します。 - 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). + 2^12 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). - 決定的な結果を提供します(クエリ処理順序に依存しません)。 diff --git a/docs/ja/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ja/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index d0660444e15..f29a608b85e 100644 --- a/docs/ja/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ja/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -55,7 +55,7 @@ SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration または ``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) SETTINGS(format_csv_allow_single_quotes = 0) ``` @@ -87,7 +87,7 @@ SETTINGS(format_csv_allow_single_quotes = 0) または ``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) ``` フィールドの設定: diff --git a/docs/ja/sql-reference/functions/bitmap-functions.md b/docs/ja/sql-reference/functions/bitmap-functions.md index cc57e762610..de3ce938444 100644 --- a/docs/ja/sql-reference/functions/bitmap-functions.md +++ b/docs/ja/sql-reference/functions/bitmap-functions.md @@ -35,7 +35,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) ``` text ┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ -│  │ AggregateFunction(groupBitmap, UInt8) │ +│ │ AggregateFunction(groupBitmap, UInt8) │ └─────┴──────────────────────────────────────────────┘ ``` diff --git a/docs/ja/sql-reference/functions/hash-functions.md b/docs/ja/sql-reference/functions/hash-functions.md index d48e6846bb4..a98ae60690d 100644 --- a/docs/ja/sql-reference/functions/hash-functions.md +++ b/docs/ja/sql-reference/functions/hash-functions.md @@ -434,13 +434,13 @@ A [FixedString(16)](../../sql-reference/data-types/fixedstring.md) データ型 **例** ``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type +SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text -┌─MurmurHash3──────┬─type────────────┐ -│ 6�1�4"S5KT�~~q │ FixedString(16) │ -└──────────────────┴─────────────────┘ +┌─MurmurHash3──────────────────────┬─type───┐ +│ 368A1A311CB7342253354B548E7E7E71 │ String │ +└──────────────────────────────────┴────────┘ ``` ## xxHash32,xxHash64 {#hash-functions-xxhash32} diff --git a/docs/ja/sql-reference/statements/select/index.md b/docs/ja/sql-reference/statements/select/index.md deleted file mode 120000 index 9c649322c82..00000000000 --- a/docs/ja/sql-reference/statements/select/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/statements/select/index.md \ No newline at end of file diff --git a/docs/ja/sql-reference/statements/select/index.md b/docs/ja/sql-reference/statements/select/index.md new file mode 100644 index 00000000000..b1a97ba1b28 --- /dev/null +++ b/docs/ja/sql-reference/statements/select/index.md @@ -0,0 +1,283 @@ +--- +title: SELECT Query +toc_folder_title: SELECT +toc_priority: 32 +toc_title: Overview +--- + +# SELECT Query {#select-queries-syntax} + +`SELECT` queries perform data retrieval. By default, the requested data is returned to the client, while in conjunction with [INSERT INTO](../../../sql-reference/statements/insert-into.md) it can be forwarded to a different table. + +## Syntax {#syntax} + +``` sql +[WITH expr_list|(subquery)] +SELECT [DISTINCT] expr_list +[FROM [db.]table | (subquery) | table_function] [FINAL] +[SAMPLE sample_coeff] +[ARRAY JOIN ...] +[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON )|(USING ) +[PREWHERE expr] +[WHERE expr] +[GROUP BY expr_list] [WITH ROLLUP|WITH CUBE] [WITH TOTALS] +[HAVING expr] +[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr] +[LIMIT [offset_value, ]n BY columns] +[LIMIT [n, ]m] [WITH TIES] +[SETTINGS ...] +[UNION ...] +[INTO OUTFILE filename] +[FORMAT format] +``` + +All clauses are optional, except for the required list of expressions immediately after `SELECT` which is covered in more detail [below](#select-clause). + +Specifics of each optional clause are covered in separate sections, which are listed in the same order as they are executed: + +- [WITH clause](../../../sql-reference/statements/select/with.md) +- [FROM clause](../../../sql-reference/statements/select/from.md) +- [SAMPLE clause](../../../sql-reference/statements/select/sample.md) +- [JOIN clause](../../../sql-reference/statements/select/join.md) +- [PREWHERE clause](../../../sql-reference/statements/select/prewhere.md) +- [WHERE clause](../../../sql-reference/statements/select/where.md) +- [GROUP BY clause](../../../sql-reference/statements/select/group-by.md) +- [LIMIT BY clause](../../../sql-reference/statements/select/limit-by.md) +- [HAVING clause](../../../sql-reference/statements/select/having.md) +- [SELECT clause](#select-clause) +- [DISTINCT clause](../../../sql-reference/statements/select/distinct.md) +- [LIMIT clause](../../../sql-reference/statements/select/limit.md) +- [OFFSET clause](../../../sql-reference/statements/select/offset.md) +- [UNION clause](../../../sql-reference/statements/select/union.md) +- [INTO OUTFILE clause](../../../sql-reference/statements/select/into-outfile.md) +- [FORMAT clause](../../../sql-reference/statements/select/format.md) + +## SELECT Clause {#select-clause} + +[Expressions](../../../sql-reference/syntax.md#syntax-expressions) specified in the `SELECT` clause are calculated after all the operations in the clauses described above are finished. These expressions work as if they apply to separate rows in the result. If expressions in the `SELECT` clause contain aggregate functions, then ClickHouse processes aggregate functions and expressions used as their arguments during the [GROUP BY](../../../sql-reference/statements/select/group-by.md) aggregation. + +If you want to include all columns in the result, use the asterisk (`*`) symbol. For example, `SELECT * FROM ...`. + + +### COLUMNS expression {#columns-expression} + +To match some columns in the result with a [re2](https://en.wikipedia.org/wiki/RE2_(software)) regular expression, you can use the `COLUMNS` expression. + +``` sql +COLUMNS('regexp') +``` + +For example, consider the table: + +``` sql +CREATE TABLE default.col_names (aa Int8, ab Int8, bc Int8) ENGINE = TinyLog +``` + +The following query selects data from all the columns containing the `a` symbol in their name. + +``` sql +SELECT COLUMNS('a') FROM col_names +``` + +``` text +┌─aa─┬─ab─┐ +│ 1 │ 1 │ +└────┴────┘ +``` + +The selected columns are returned not in the alphabetical order. + +You can use multiple `COLUMNS` expressions in a query and apply functions to them. + +For example: + +``` sql +SELECT COLUMNS('a'), COLUMNS('c'), toTypeName(COLUMNS('c')) FROM col_names +``` + +``` text +┌─aa─┬─ab─┬─bc─┬─toTypeName(bc)─┐ +│ 1 │ 1 │ 1 │ Int8 │ +└────┴────┴────┴────────────────┘ +``` + +Each column returned by the `COLUMNS` expression is passed to the function as a separate argument. Also you can pass other arguments to the function if it supports them. Be careful when using functions. If a function doesn’t support the number of arguments you have passed to it, ClickHouse throws an exception. + +For example: + +``` sql +SELECT COLUMNS('a') + COLUMNS('c') FROM col_names +``` + +``` text +Received exception from server (version 19.14.1): +Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of arguments for function plus doesn't match: passed 3, should be 2. +``` + +In this example, `COLUMNS('a')` returns two columns: `aa` and `ab`. `COLUMNS('c')` returns the `bc` column. The `+` operator can’t apply to 3 arguments, so ClickHouse throws an exception with the relevant message. + +Columns that matched the `COLUMNS` expression can have different data types. If `COLUMNS` doesn’t match any columns and is the only expression in `SELECT`, ClickHouse throws an exception. + +### Asterisk {#asterisk} + +You can put an asterisk in any part of a query instead of an expression. When the query is analyzed, the asterisk is expanded to a list of all table columns (excluding the `MATERIALIZED` and `ALIAS` columns). There are only a few cases when using an asterisk is justified: + +- When creating a table dump. +- For tables containing just a few columns, such as system tables. +- For getting information about what columns are in a table. In this case, set `LIMIT 1`. But it is better to use the `DESC TABLE` query. +- When there is strong filtration on a small number of columns using `PREWHERE`. +- In subqueries (since columns that aren’t needed for the external query are excluded from subqueries). + +In all other cases, we don’t recommend using the asterisk, since it only gives you the drawbacks of a columnar DBMS instead of the advantages. In other words using the asterisk is not recommended. + +### Extreme Values {#extreme-values} + +In addition to results, you can also get minimum and maximum values for the results columns. To do this, set the **extremes** setting to 1. Minimums and maximums are calculated for numeric types, dates, and dates with times. For other columns, the default values are output. + +An extra two rows are calculated – the minimums and maximums, respectively. These extra two rows are output in `JSON*`, `TabSeparated*`, and `Pretty*` [formats](../../../interfaces/formats.md), separate from the other rows. They are not output for other formats. + +In `JSON*` formats, the extreme values are output in a separate ‘extremes’ field. In `TabSeparated*` formats, the row comes after the main result, and after ‘totals’ if present. It is preceded by an empty row (after the other data). In `Pretty*` formats, the row is output as a separate table after the main result, and after `totals` if present. + +Extreme values are calculated for rows before `LIMIT`, but after `LIMIT BY`. However, when using `LIMIT offset, size`, the rows before `offset` are included in `extremes`. In stream requests, the result may also include a small number of rows that passed through `LIMIT`. + +### Notes {#notes} + +You can use synonyms (`AS` aliases) in any part of a query. + +The `GROUP BY` and `ORDER BY` clauses do not support positional arguments. This contradicts MySQL, but conforms to standard SQL. For example, `GROUP BY 1, 2` will be interpreted as grouping by constants (i.e. aggregation of all rows into one). + +## Implementation Details {#implementation-details} + +If the query omits the `DISTINCT`, `GROUP BY` and `ORDER BY` clauses and the `IN` and `JOIN` subqueries, the query will be completely stream processed, using O(1) amount of RAM. Otherwise, the query might consume a lot of RAM if the appropriate restrictions are not specified: + +- `max_memory_usage` +- `max_rows_to_group_by` +- `max_rows_to_sort` +- `max_rows_in_distinct` +- `max_bytes_in_distinct` +- `max_rows_in_set` +- `max_bytes_in_set` +- `max_rows_in_join` +- `max_bytes_in_join` +- `max_bytes_before_external_sort` +- `max_bytes_before_external_group_by` + +For more information, see the section “Settings”. It is possible to use external sorting (saving temporary tables to a disk) and external aggregation. + +## SELECT modifiers {#select-modifiers} + +You can use the following modifiers in `SELECT` queries. + +### APPLY {#apply-modifier} + +Allows you to invoke some function for each row returned by an outer table expression of a query. + +**Syntax:** + +``` sql +SELECT APPLY( ) FROM [db.]table_name +``` + +**Example:** + +``` sql +CREATE TABLE columns_transformers (i Int64, j Int16, k Int64) ENGINE = MergeTree ORDER by (i); +INSERT INTO columns_transformers VALUES (100, 10, 324), (120, 8, 23); +SELECT * APPLY(sum) FROM columns_transformers; +``` + +``` +┌─sum(i)─┬─sum(j)─┬─sum(k)─┐ +│ 220 │ 18 │ 347 │ +└────────┴────────┴────────┘ +``` + +### EXCEPT {#except-modifier} + +Specifies the names of one or more columns to exclude from the result. All matching column names are omitted from the output. + +**Syntax:** + +``` sql +SELECT EXCEPT ( col_name1 [, col_name2, col_name3, ...] ) FROM [db.]table_name +``` + +**Example:** + +``` sql +SELECT * EXCEPT (i) from columns_transformers; +``` + +``` +┌──j─┬───k─┐ +│ 10 │ 324 │ +│ 8 │ 23 │ +└────┴─────┘ +``` + +### REPLACE {#replace-modifier} + +Specifies one or more [expression aliases](../../../sql-reference/syntax.md#syntax-expression_aliases). Each alias must match a column name from the `SELECT *` statement. In the output column list, the column that matches the alias is replaced by the expression in that `REPLACE`. + +This modifier does not change the names or order of columns. However, it can change the value and the value type. + +**Syntax:** + +``` sql +SELECT REPLACE( AS col_name) from [db.]table_name +``` + +**Example:** + +``` sql +SELECT * REPLACE(i + 1 AS i) from columns_transformers; +``` + +``` +┌───i─┬──j─┬───k─┐ +│ 101 │ 10 │ 324 │ +│ 121 │ 8 │ 23 │ +└─────┴────┴─────┘ +``` + +### Modifier Combinations {#modifier-combinations} + +You can use each modifier separately or combine them. + +**Examples:** + +Using the same modifier multiple times. + +``` sql +SELECT COLUMNS('[jk]') APPLY(toString) APPLY(length) APPLY(max) from columns_transformers; +``` + +``` +┌─max(length(toString(j)))─┬─max(length(toString(k)))─┐ +│ 2 │ 3 │ +└──────────────────────────┴──────────────────────────┘ +``` + +Using multiple modifiers in a single query. + +``` sql +SELECT * REPLACE(i + 1 AS i) EXCEPT (j) APPLY(sum) from columns_transformers; +``` + +``` +┌─sum(plus(i, 1))─┬─sum(k)─┐ +│ 222 │ 347 │ +└─────────────────┴────────┘ +``` + +## SETTINGS in SELECT Query {#settings-in-select} + +You can specify the necessary settings right in the `SELECT` query. The setting value is applied only to this query and is reset to default or previous value after the query is executed. + +Other ways to make settings see [here](../../../operations/settings/index.md). + +**Example** + +``` sql +SELECT * FROM some_table SETTINGS optimize_read_in_order=1, cast_keep_nullable=1; +``` diff --git a/docs/ja/sql-reference/statements/select/offset.md b/docs/ja/sql-reference/statements/select/offset.md new file mode 100644 index 00000000000..3efd916bcb8 --- /dev/null +++ b/docs/ja/sql-reference/statements/select/offset.md @@ -0,0 +1,86 @@ +--- +toc_title: OFFSET +--- + +# OFFSET FETCH Clause {#offset-fetch} + +`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query. + +``` sql +OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] +``` + +The `offset_row_count` or `fetch_row_count` value can be a number or a literal constant. You can omit `fetch_row_count`; by default, it equals to 1. + +`OFFSET` specifies the number of rows to skip before starting to return rows from the query result set. + +The `FETCH` specifies the maximum number of rows that can be in the result of a query. + +The `ONLY` option is used to return rows that immediately follow the rows omitted by the `OFFSET`. In this case the `FETCH` is an alternative to the [LIMIT](../../../sql-reference/statements/select/limit.md) clause. For example, the following query + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; +``` + +is identical to the query + +``` sql +SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; +``` + +The `WITH TIES` option is used to return any additional rows that tie for the last place in the result set according to the `ORDER BY` clause. For example, if `fetch_row_count` is set to 5 but two additional rows match the values of the `ORDER BY` columns in the fifth row, the result set will contain seven rows. + +!!! note "Note" + According to the standard, the `OFFSET` clause must come before the `FETCH` clause if both are present. + +!!! note "Note" + The real offset can also depend on the [offset](../../../operations/settings/settings.md#offset) setting. + +## Examples {#examples} + +Input table: + +``` text +┌─a─┬─b─┐ +│ 1 │ 1 │ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 1 │ 3 │ +│ 5 │ 4 │ +│ 0 │ 6 │ +│ 5 │ 7 │ +└───┴───┘ +``` + +Usage of the `ONLY` option: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; +``` + +Result: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +└───┴───┘ +``` + +Usage of the `WITH TIES` option: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; +``` + +Result: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +│ 5 │ 7 │ +└───┴───┘ +``` diff --git a/docs/ru/commercial/cloud.md b/docs/ru/commercial/cloud.md index 610f0f00a99..e00fc3be673 100644 --- a/docs/ru/commercial/cloud.md +++ b/docs/ru/commercial/cloud.md @@ -29,3 +29,30 @@ toc_title: "Поставщики облачных услуг ClickHouse" - cross-az масштабирование для повышения производительности и обеспечения высокой доступности - встроенный мониторинг и редактор SQL-запросов +## Alibaba Cloud {#alibaba-cloud} + +Управляемый облачный сервис Alibaba для ClickHouse: [китайская площадка](https://www.aliyun.com/product/clickhouse), будет доступен на международной площадке в мае 2021 года. Сервис предоставляет следующие возможности: + +- надежный сервер для облачного хранилища на основе распределенной системы [Alibaba Cloud Apsara](https://www.alibabacloud.com/product/apsara-stack); +- расширяемая по запросу емкость, без переноса данных вручную; +- поддержка одноузловой и многоузловой архитектуры, архитектуры с одной или несколькими репликами, а также многоуровневого хранения cold и hot data; +- поддержка прав доступа, one-key восстановления, многоуровневая защита сети, шифрование облачного диска; +- полная интеграция с облачными системами логирования, базами данных и инструментами обработки данных; +- встроенная платформа для мониторинга и управления базами данных; +- техническая поддержка от экспертов по работе с базами данных. + +## SberCloud {#sbercloud} + +[Облачная платформа SberCloud.Advanced](https://sbercloud.ru/ru/advanced): + +- предоставляет более 50 высокотехнологичных сервисов; +- позволяет быстро создавать и эффективно управлять ИТ-инфраструктурой, приложениями и интернет-сервисами; +- радикально минимизирует ресурсы, требуемые для работы корпоративных ИТ-систем; +- в разы сокращает время вывода новых продуктов на рынок. + +SberCloud.Advanced предоставляет [MapReduce Service (MRS)](https://docs.sbercloud.ru/mrs/ug/topics/ug__clickhouse.html) — надежную, безопасную и простую в использовании платформу корпоративного уровня для хранения, обработки и анализа больших данных. MRS позволяет быстро создавать и управлять кластерами ClickHouse. + +- Инстанс ClickHouse состоит из трех узлов ZooKeeper и нескольких узлов ClickHouse. Выделенный режим реплики используется для обеспечения высокой надежности двойных копий данных. +- MRS предлагает возможности гибкого масштабирования при быстром росте сервисов в сценариях, когда емкости кластерного хранилища или вычислительных ресурсов процессора недостаточно. MRS в один клик предоставляет инструмент для балансировки данных при расширении узлов ClickHouse в кластере. Вы можете определить режим и время балансировки данных на основе характеристик сервиса, чтобы обеспечить доступность сервиса. +- MRS использует архитектуру развертывания высокой доступности на основе Elastic Load Balance (ELB) — сервиса для автоматического распределения трафика на несколько внутренних узлов. Благодаря ELB, данные записываются в локальные таблицы и считываются из распределенных таблиц на разных узлах. Такая архитектура повышает отказоустойчивость кластера и гарантирует высокую доступность приложений. + diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index 9f43fabba4f..d2cfc44b711 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -27,7 +27,7 @@ ClickHouse - полноценная колоночная СУБД. Данные `IColumn` предоставляет методы для общих реляционных преобразований данных, но они не отвечают всем потребностям. Например, `ColumnUInt64` не имеет метода для вычисления суммы двух столбцов, а `ColumnString` не имеет метода для запуска поиска по подстроке. Эти бесчисленные процедуры реализованы вне `IColumn`. -Различные функции на колонках могут быть реализованы обобщенным, неэффективным путем, используя `IColumn` методы для извлечения значений `Field`, или специальным путем, используя знания о внутреннем распределение данных в памяти в конкретной реализации `IColumn`. Для этого функции приводятся к конкретному типу `IColumn` и работают напрямую с его внутренним представлением. Например, в `ColumnUInt64` есть метод getData, который возвращает ссылку на внутренний массив, чтение и заполнение которого, выполняется отдельной процедурой напрямую. Фактически, мы имеем "дырявую абстракции", обеспечивающие эффективные специализации различных процедур. +Различные функции на колонках могут быть реализованы обобщенным, неэффективным путем, используя `IColumn` методы для извлечения значений `Field`, или специальным путем, используя знания о внутреннем распределение данных в памяти в конкретной реализации `IColumn`. Для этого функции приводятся к конкретному типу `IColumn` и работают напрямую с его внутренним представлением. Например, в `ColumnUInt64` есть метод `getData`, который возвращает ссылку на внутренний массив, чтение и заполнение которого, выполняется отдельной процедурой напрямую. Фактически, мы имеем "дырявые абстракции", обеспечивающие эффективные специализации различных процедур. ## Типы данных (Data Types) {#data_types} @@ -42,7 +42,7 @@ ClickHouse - полноценная колоночная СУБД. Данные ## Блоки (Block) {#block} -`Block` это контейнер, который представляет фрагмент (chunk) таблицы в памяти. Это набор троек - `(IColumn, IDataType, имя колонки)`. В процессе выполнения запроса, данные обрабатываются `Block`ами. Если у нас есть `Block`, значит у нас есть данные (в объекте `IColumn`), информация о типе (в `IDataType`), которая говорит нам, как работать с колонкой, и имя колонки (оригинальное имя колонки таблицы или служебное имя, присвоенное для получения промежуточных результатов вычислений). +`Block` это контейнер, который представляет фрагмент (chunk) таблицы в памяти. Это набор троек - `(IColumn, IDataType, имя колонки)`. В процессе выполнения запроса, данные обрабатываются `Block`-ами. Если у нас есть `Block`, значит у нас есть данные (в объекте `IColumn`), информация о типе (в `IDataType`), которая говорит нам, как работать с колонкой, и имя колонки (оригинальное имя колонки таблицы или служебное имя, присвоенное для получения промежуточных результатов вычислений). При вычислении некоторой функции на колонках в блоке мы добавляем еще одну колонку с результатами в блок, не трогая колонки аргументов функции, потому что операции иммутабельные. Позже ненужные колонки могут быть удалены из блока, но не модифицированы. Это удобно для устранения общих подвыражений. @@ -58,7 +58,7 @@ ClickHouse - полноценная колоночная СУБД. Данные 2. Реализацию форматов данных. Например, при выводе данных в терминал в формате `Pretty`, вы создаете выходной поток блоков, который форматирует поступающие в него блоки. 3. Трансформацию данных. Допустим, у вас есть `IBlockInputStream` и вы хотите создать отфильтрованный поток. Вы создаете `FilterBlockInputStream` и инициализируете его вашим потоком. Затем вы тянете (pull) блоки из `FilterBlockInputStream`, а он тянет блоки исходного потока, фильтрует их и возвращает отфильтрованные блоки вам. Таким образом построены конвейеры выполнения запросов. -Имеются и более сложные трансформации. Например, когда вы тянете блоки из `AggregatingBlockInputStream`, он считывает все данные из своего источника, агрегирует их, и возвращает поток агрегированных данных вам. Другой пример: конструктор `UnionBlockInputStream` принимает множество источников входных данных и число потоков. Такой `Stream` работает в несколько потоков и читает данные источников параллельно. +Имеются и более сложные трансформации. Например, когда вы тянете блоки из `AggregatingBlockInputStream`, он считывает все данные из своего источника, агрегирует их, и возвращает поток агрегированных данных вам. Другой пример: конструктор `UnionBlockInputStream` принимает множество источников входных данных и число потоков. Такой `Stream` работает в несколько потоков и читает данные источников параллельно. > Потоки блоков используют «втягивающий» (pull) подход к управлению потоком выполнения: когда вы вытягиваете блок из первого потока, он, следовательно, вытягивает необходимые блоки из вложенных потоков, так и работает весь конвейер выполнения. Ни «pull» ни «push» не имеют явного преимущества, потому что поток управления неявный, и это ограничивает в реализации различных функций, таких как одновременное выполнение нескольких запросов (слияние нескольких конвейеров вместе). Это ограничение можно преодолеть с помощью сопрограмм (coroutines) или просто запуском дополнительных потоков, которые ждут друг друга. У нас может быть больше возможностей, если мы сделаем поток управления явным: если мы локализуем логику для передачи данных из одной расчетной единицы в другую вне этих расчетных единиц. Читайте эту [статью](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) для углубленного изучения. @@ -110,9 +110,9 @@ ClickHouse - полноценная колоночная СУБД. Данные > Генераторы парсеров не используются по историческим причинам. ## Интерпретаторы {#interpreters} - + Интерпретаторы отвечают за создание конвейера выполнения запроса из `AST`. Есть простые интерпретаторы, такие как `InterpreterExistsQuery` и `InterpreterDropQuery` или более сложный `InterpreterSelectQuery`. Конвейер выполнения запроса представляет собой комбинацию входных и выходных потоков блоков. Например, результатом интерпретации `SELECT` запроса является `IBlockInputStream` для чтения результирующего набора данных; результат интерпретации `INSERT` запроса - это `IBlockOutputStream`, для записи данных, предназначенных для вставки; результат интерпретации `INSERT SELECT` запроса - это `IBlockInputStream`, который возвращает пустой результирующий набор при первом чтении, но копирует данные из `SELECT` к `INSERT`. - + `InterpreterSelectQuery` использует `ExpressionAnalyzer` и `ExpressionActions` механизмы для анализа запросов и преобразований. Именно здесь выполняется большинство оптимизаций запросов на основе правил. `ExpressionAnalyzer` написан довольно грязно и должен быть переписан: различные преобразования запросов и оптимизации должны быть извлечены в отдельные классы, чтобы позволить модульные преобразования или запросы. ## Функции {#functions} @@ -162,9 +162,9 @@ ClickHouse имеет сильную типизацию, поэтому нет Сервера в кластере в основном независимы. Вы можете создать `Распределенную` (`Distributed`) таблицу на одном или всех серверах в кластере. Такая таблица сама по себе не хранит данные - она только предоставляет возможность "просмотра" всех локальных таблиц на нескольких узлах кластера. При выполнении `SELECT` распределенная таблица переписывает запрос, выбирает удаленные узлы в соответствии с настройками балансировки нагрузки и отправляет им запрос. Распределенная таблица просит удаленные сервера обработать запрос до той стадии, когда промежуточные результаты с разных серверов могут быть объединены. Затем он получает промежуточные результаты и объединяет их. Распределенная таблица пытается возложить как можно больше работы на удаленные серверы и сократить объем промежуточных данных, передаваемых по сети. -Ситуация усложняется, при использовании подзапросы в случае IN или JOIN, когда каждый из них использует таблицу `Distributed`. Есть разные стратегии для выполнения таких запросов. +Ситуация усложняется, при использовании подзапросов в случае `IN` или `JOIN`, когда каждый из них использует таблицу `Distributed`. Есть разные стратегии для выполнения таких запросов. -Глобального плана выполнения распределенных запросов не существует. Каждый узел имеет собственный локальный план для своей части работы. У нас есть простое однонаправленное выполнение распределенных запросов: мы отправляем запросы на удаленные узлы и затем объединяем результаты. Но это невозможно для сложных запросов GROUP BY высокой кардинальности или запросов с большим числом временных данных в JOIN: в таких случаях нам необходимо перераспределить («reshuffle») данные между серверами, что требует дополнительной координации. ClickHouse не поддерживает выполнение запросов такого рода, и нам нужно работать над этим. +Глобального плана выполнения распределенных запросов не существует. Каждый узел имеет собственный локальный план для своей части работы. У нас есть простое однонаправленное выполнение распределенных запросов: мы отправляем запросы на удаленные узлы и затем объединяем результаты. Но это невозможно для сложных запросов `GROUP BY` высокой кардинальности или запросов с большим числом временных данных в `JOIN`: в таких случаях нам необходимо перераспределить («reshuffle») данные между серверами, что требует дополнительной координации. ClickHouse не поддерживает выполнение запросов такого рода, и нам нужно работать над этим. ## Merge Tree {#merge-tree} @@ -190,7 +190,7 @@ ClickHouse имеет сильную типизацию, поэтому нет Репликация использует асинхронную multi-master схему. Вы можете вставить данные в любую реплику, которая имеет открытую сессию в `ZooKeeper`, и данные реплицируются на все другие реплики асинхронно. Поскольку ClickHouse не поддерживает UPDATE, репликация исключает конфликты (conflict-free replication). Поскольку подтверждение вставок кворумом не реализовано, только что вставленные данные могут быть потеряны в случае сбоя одного узла. -Метаданные для репликации хранятся в `ZooKeeper`. Существует журнал репликации, в котором перечислены действия, которые необходимо выполнить. Среди этих действий: получить часть (get the part); объединить части (merge parts); удалить партицию (drop a partition) и так далее. Каждая реплика копирует журнал репликации в свою очередь, а затем выполняет действия из очереди. Например, при вставке в журнале создается действие «получить часть» (get the part), и каждая реплика загружает эту часть. Слияния координируются между репликами, чтобы получить идентичные до байта результаты. Все части объединяются одинаково на всех репликах. Одна из реплик-лидеров инициирует новое слияние кусков первой и записывает действия «слияния частей» в журнал. Несколько реплик (или все) могут быть лидерами одновременно. Реплике можно запретить быть лидером с помощью `merge_tree` настройки `replicated_can_become_leader`. +Метаданные для репликации хранятся в `ZooKeeper`. Существует журнал репликации, в котором перечислены действия, которые необходимо выполнить. Среди этих действий: получить часть (get the part); объединить части (merge parts); удалить партицию (drop a partition) и так далее. Каждая реплика копирует журнал репликации в свою очередь, а затем выполняет действия из очереди. Например, при вставке в журнале создается действие «получить часть» (get the part), и каждая реплика загружает эту часть. Слияния координируются между репликами, чтобы получить идентичные до байта результаты. Все части объединяются одинаково на всех репликах. Одна из реплик-лидеров инициирует новое слияние кусков первой и записывает действия «слияния частей» в журнал. Несколько реплик (или все) могут быть лидерами одновременно. Реплике можно запретить быть лидером с помощью `merge_tree` настройки `replicated_can_become_leader`. Репликация является физической: между узлами передаются только сжатые части, а не запросы. Слияния обрабатываются на каждой реплике независимо, в большинстве случаев, чтобы снизить затраты на сеть, во избежание усиления роли сети. Крупные объединенные части отправляются по сети только в случае значительной задержки репликации. diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md index 9ddb17b7212..463d38a44fb 100644 --- a/docs/ru/development/developer-instruction.md +++ b/docs/ru/development/developer-instruction.md @@ -7,15 +7,15 @@ toc_title: "Инструкция для разработчиков" Сборка ClickHouse поддерживается на Linux, FreeBSD, Mac OS X. -# Если вы используете Windows {#esli-vy-ispolzuete-windows} +## Если вы используете Windows {#esli-vy-ispolzuete-windows} Если вы используете Windows, вам потребуется создать виртуальную машину с Ubuntu. Для работы с виртуальной машиной, установите VirtualBox. Скачать Ubuntu можно на сайте: https://www.ubuntu.com/#download Создайте виртуальную машину из полученного образа. Выделите для неё не менее 4 GB оперативной памяти. Для запуска терминала в Ubuntu, найдите в меню программу со словом terminal (gnome-terminal, konsole или что-то в этом роде) или нажмите Ctrl+Alt+T. -# Если вы используете 32-битную систему {#esli-vy-ispolzuete-32-bitnuiu-sistemu} +## Если вы используете 32-битную систему {#esli-vy-ispolzuete-32-bitnuiu-sistemu} ClickHouse не работает и не собирается на 32-битных системах. Получите доступ к 64-битной системе и продолжайте. -# Создание репозитория на GitHub {#sozdanie-repozitoriia-na-github} +## Создание репозитория на GitHub {#sozdanie-repozitoriia-na-github} Для работы с репозиторием ClickHouse, вам потребуется аккаунт на GitHub. Наверное, он у вас уже есть. @@ -34,7 +34,7 @@ ClickHouse не работает и не собирается на 32-битны Подробное руководство по использованию Git: https://git-scm.com/book/ru/v2 -# Клонирование репозитория на рабочую машину {#klonirovanie-repozitoriia-na-rabochuiu-mashinu} +## Клонирование репозитория на рабочую машину {#klonirovanie-repozitoriia-na-rabochuiu-mashinu} Затем вам потребуется загрузить исходники для работы на свой компьютер. Это называется «клонирование репозитория», потому что создаёт на вашем компьютере локальную копию репозитория, с которой вы будете работать. @@ -78,7 +78,7 @@ ClickHouse не работает и не собирается на 32-битны После этого, вы сможете добавлять в свой репозиторий обновления из репозитория Яндекса с помощью команды `git pull upstream master`. -## Работа с сабмодулями Git {#rabota-s-sabmoduliami-git} +### Работа с сабмодулями Git {#rabota-s-sabmoduliami-git} Работа с сабмодулями git может быть достаточно болезненной. Следующие команды позволят содержать их в порядке: @@ -110,7 +110,7 @@ The next commands would help you to reset all submodules to the initial state (! git submodule foreach git submodule foreach git reset --hard git submodule foreach git submodule foreach git clean -xfd -# Система сборки {#sistema-sborki} +## Система сборки {#sistema-sborki} ClickHouse использует систему сборки CMake и Ninja. @@ -130,25 +130,25 @@ Ninja - система запуска сборочных задач. Проверьте версию CMake: `cmake --version`. Если версия меньше 3.3, то установите новую версию с сайта https://cmake.org/download/ -# Необязательные внешние библиотеки {#neobiazatelnye-vneshnie-biblioteki} +## Необязательные внешние библиотеки {#neobiazatelnye-vneshnie-biblioteki} ClickHouse использует для сборки некоторое количество внешних библиотек. Но ни одну из них не требуется отдельно устанавливать, так как они собираются вместе с ClickHouse, из исходников, которые расположены в submodules. Посмотреть набор этих библиотек можно в директории contrib. -# Компилятор C++ {#kompiliator-c} +## Компилятор C++ {#kompiliator-c} -В качестве компилятора C++ поддерживается GCC начиная с версии 9 или Clang начиная с версии 8. +В качестве компилятора C++ поддерживается Clang начиная с версии 11. -Официальные сборки от Яндекса, на данный момент, используют GCC, так как он генерирует слегка более производительный машинный код (разница в среднем до нескольких процентов по нашим бенчмаркам). Clang обычно более удобен для разработки. Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки. +Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки, включая gcc, но сборка с помощью gcc непригодна для использования в продакшене. -Для установки GCC под Ubuntu, выполните: `sudo apt install gcc g++`. +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -Проверьте версию gcc: `gcc --version`. Если версия меньше 10, то следуйте инструкции: https://clickhouse.tech/docs/ru/development/build/#install-gcc-10. +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` Сборка под Mac OS X поддерживается только для компилятора Clang. Чтобы установить его выполните `brew install llvm` -Если вы решили использовать Clang, вы также можете установить `libc++` и `lld`, если вы знаете, что это такое. При желании, установите `ccache`. - -# Процесс сборки {#protsess-sborki} +## Процесс сборки {#protsess-sborki} Теперь вы готовы к сборке ClickHouse. Для размещения собранных файлов, рекомендуется создать отдельную директорию build внутри директории ClickHouse: @@ -158,14 +158,7 @@ ClickHouse использует для сборки некоторое коли Вы можете иметь несколько разных директорий (build_release, build_debug) для разных вариантов сборки. Находясь в директории build, выполните конфигурацию сборки с помощью CMake. -Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора (в данном примере это - gcc версии 9). - -Linux: - - export CC=gcc-10 CXX=g++-10 - cmake .. - -Mac OS X: +Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора. export CC=clang CXX=clang++ cmake .. @@ -206,7 +199,7 @@ Mac OS X: ls -l programs/clickhouse -# Запуск собранной версии ClickHouse {#zapusk-sobrannoi-versii-clickhouse} +## Запуск собранной версии ClickHouse {#zapusk-sobrannoi-versii-clickhouse} Для запуска сервера из под текущего пользователя, с выводом логов в терминал и с использованием примеров конфигурационных файлов, расположенных в исходниках, перейдите в директорию `ClickHouse/programs/server/` (эта директория находится не в директории build) и выполните: @@ -233,7 +226,7 @@ Mac OS X: sudo service clickhouse-server stop sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml -# Среда разработки {#sreda-razrabotki} +## Среда разработки {#sreda-razrabotki} Если вы не знаете, какую среду разработки использовать, то рекомендуется использовать CLion. CLion является платным ПО, но его можно использовать бесплатно в течение пробного периода. Также он бесплатен для учащихся. CLion можно использовать как под Linux, так и под Mac OS X. @@ -243,7 +236,7 @@ Mac OS X: На всякий случай заметим, что CLion самостоятельно создаёт свою build директорию, самостоятельно выбирает тип сборки debug по-умолчанию, для конфигурации использует встроенную в CLion версию CMake вместо установленного вами, а для запуска задач использует make вместо ninja. Это нормально, просто имейте это ввиду, чтобы не возникало путаницы. -# Написание кода {#napisanie-koda} +## Написание кода {#napisanie-koda} Описание архитектуры ClickHouse: https://clickhouse.tech/docs/ru/development/architecture/ @@ -253,7 +246,7 @@ Mac OS X: Список задач: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22 -# Тестовые данные {#testovye-dannye} +## Тестовые данные {#testovye-dannye} Разработка ClickHouse часто требует загрузки реалистичных наборов данных. Особенно это важно для тестирования производительности. Специально для вас мы подготовили набор данных, представляющий собой анонимизированные данные Яндекс.Метрики. Загрузка этих данных потребует ещё 3 GB места на диске. Для выполнения большинства задач разработки, загружать эти данные не обязательно. @@ -274,7 +267,7 @@ Mac OS X: clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv -# Создание Pull Request {#sozdanie-pull-request} +## Создание Pull Request {#sozdanie-pull-request} Откройте свой форк репозитория в интерфейсе GitHub. Если вы вели разработку в бранче, выберите этот бранч. На странице будет доступна кнопка «Pull request». По сути, это означает «создать заявку на принятие моих изменений в основной репозиторий». diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index f08ecc3c4c7..de29e629ceb 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -747,7 +747,7 @@ The dictionary is configured incorrectly. Есть два основных варианта проверки на такие ошибки: * Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке. -* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок. +* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок. Пример сообщения, у которого должен быть код `LOGICAL_ERROR`: `Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!` @@ -780,7 +780,7 @@ The dictionary is configured incorrectly. **2.** Язык - C++20 (см. список доступных [C++20 фич](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)). -**3.** Компилятор - `gcc`. На данный момент (август 2020), код собирается версией 9.3. (Также код может быть собран `clang` версий 10 и 9) +**3.** Компилятор - `clang`. На данный момент (апрель 2021), код собирается версией 11. (Также код может быть собран `gcc` версии 10, но такая сборка не тестируется и непригодна для продакшена). Используется стандартная библиотека (реализация `libc++`). diff --git a/docs/ru/engines/database-engines/atomic.md b/docs/ru/engines/database-engines/atomic.md index a371301fd2e..8c75be3d93b 100644 --- a/docs/ru/engines/database-engines/atomic.md +++ b/docs/ru/engines/database-engines/atomic.md @@ -3,15 +3,52 @@ toc_priority: 32 toc_title: Atomic --- - # Atomic {#atomic} -Поддерживает неблокирующие запросы `DROP` и `RENAME TABLE` и запросы `EXCHANGE TABLES t1 AND t2`. Движок `Atomic` используется по умолчанию. +Поддерживает неблокирующие запросы [DROP TABLE](#drop-detach-table) и [RENAME TABLE](#rename-table) и атомарные запросы [EXCHANGE TABLES t1 AND t](#exchange-tables). Движок `Atomic` используется по умолчанию. ## Создание БД {#creating-a-database} -```sql -CREATE DATABASE test ENGINE = Atomic; +``` sql + CREATE DATABASE test[ ENGINE = Atomic]; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/database-engines/atomic/) +## Особенности и рекомендации {#specifics-and-recommendations} + +### UUID {#table-uuid} + +Каждая таблица в базе данных `Atomic` имеет уникальный [UUID](../../sql-reference/data-types/uuid.md) и хранит данные в папке `/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`, где `xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` - это UUID таблицы. +Обычно UUID генерируется автоматически, но пользователь также может явно указать UUID в момент создания таблицы (однако это не рекомендуется). Для отображения UUID в запросе `SHOW CREATE` вы можете использовать настройку [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil). Результат выполнения в таком случае будет иметь вид: + +```sql +CREATE TABLE name UUID '28f1c61c-2970-457a-bffe-454156ddcfef' (n UInt64) ENGINE = ...; +``` +### RENAME TABLE {#rename-table} + +Запросы `RENAME` выполняются без изменения UUID и перемещения табличных данных. Эти запросы не ожидают завершения использующих таблицу запросов и будут выполнены мгновенно. + +### DROP/DETACH TABLE {#drop-detach-table} + +При выполнении запроса `DROP TABLE` никакие данные не удаляются. Таблица помечается как удаленная, метаданные перемещаются в папку `/clickhouse_path/metadata_dropped/` и база данных уведомляет фоновый поток. Задержка перед окончательным удалением данных задается настройкой [database_atomic_delay_before_drop_table_sec](../../operations/server-configuration-parameters/settings.md#database_atomic_delay_before_drop_table_sec). +Вы можете задать синхронный режим, определяя модификатор `SYNC`. Используйте для этого настройку [database_atomic_wait_for_drop_and_detach_synchronously](../../operations/settings/settings.md#database_atomic_wait_for_drop_and_detach_synchronously). В этом случае запрос `DROP` ждет завершения `SELECT`, `INSERT` и других запросов, которые используют таблицу. Таблица будет фактически удалена, когда она не будет использоваться. + +### EXCHANGE TABLES {#exchange-tables} + +Запрос `EXCHANGE` меняет местами две таблицы атомарно. Вместо неатомарной операции: + +```sql +RENAME TABLE new_table TO tmp, old_table TO new_table, tmp TO old_table; +``` +вы можете использовать один атомарный запрос: + +``` sql +EXCHANGE TABLES new_table AND old_table; +``` + +### ReplicatedMergeTree in Atomic Database {#replicatedmergetree-in-atomic-database} + +Для таблиц [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) рекомендуется не указывать параметры движка - путь в ZooKeeper и имя реплики. В этом случае будут использоваться параметры конфигурации: [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) и [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Если вы хотите определить параметры движка явно, рекомендуется использовать макрос {uuid}. Это удобно, так как автоматически генерируются уникальные пути для каждой таблицы в ZooKeeper. + +## Смотрите также + +- Системная таблица [system.databases](../../operations/system-tables/databases.md). diff --git a/docs/ru/engines/database-engines/index.md b/docs/ru/engines/database-engines/index.md index ec92edd2888..d4fad8f43a9 100644 --- a/docs/ru/engines/database-engines/index.md +++ b/docs/ru/engines/database-engines/index.md @@ -4,7 +4,7 @@ toc_priority: 27 toc_title: "Введение" --- -# Движки баз данных {#dvizhki-baz-dannykh} +# Движки баз данных {#database-engines} Движки баз данных обеспечивают работу с таблицами. @@ -18,3 +18,5 @@ toc_title: "Введение" - [Lazy](../../engines/database-engines/lazy.md) +- [PostgreSQL](../../engines/database-engines/postgresql.md) + diff --git a/docs/ru/engines/database-engines/postgresql.md b/docs/ru/engines/database-engines/postgresql.md new file mode 100644 index 00000000000..c11dab6f1aa --- /dev/null +++ b/docs/ru/engines/database-engines/postgresql.md @@ -0,0 +1,138 @@ +--- +toc_priority: 35 +toc_title: PostgreSQL +--- + +# PostgreSQL {#postgresql} + +Позволяет подключаться к БД на удаленном сервере [PostgreSQL](https://www.postgresql.org). Поддерживает операции чтения и записи (запросы `SELECT` и `INSERT`) для обмена данными между ClickHouse и PostgreSQL. + +Позволяет в реальном времени получать от удаленного сервера PostgreSQL информацию о таблицах БД и их структуре с помощью запросов `SHOW TABLES` и `DESCRIBE TABLE`. + +Поддерживает операции изменения структуры таблиц (`ALTER TABLE ... ADD|DROP COLUMN`). Если параметр `use_table_cache` (см. ниже раздел Параметры движка) установлен в значение `1`, структура таблицы кешируется, и изменения в структуре не отслеживаются, но будут обновлены, если выполнить команды `DETACH` и `ATTACH`. + +## Создание БД {#creating-a-database} + +``` sql +CREATE DATABASE test_database +ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cache`]); +``` + +**Параметры движка** + +- `host:port` — адрес сервера PostgreSQL. +- `database` — имя удаленной БД. +- `user` — пользователь PostgreSQL. +- `password` — пароль пользователя. +- `use_table_cache` — определяет кеширование структуры таблиц БД. Необязательный параметр. Значение по умолчанию: `0`. + +## Поддерживаемые типы данных {#data_types-support} + +| PostgerSQL | ClickHouse | +|------------------|--------------------------------------------------------------| +| DATE | [Date](../../sql-reference/data-types/date.md) | +| TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | +| REAL | [Float32](../../sql-reference/data-types/float.md) | +| DOUBLE | [Float64](../../sql-reference/data-types/float.md) | +| DECIMAL, NUMERIC | [Decimal](../../sql-reference/data-types/decimal.md) | +| SMALLINT | [Int16](../../sql-reference/data-types/int-uint.md) | +| INTEGER | [Int32](../../sql-reference/data-types/int-uint.md) | +| BIGINT | [Int64](../../sql-reference/data-types/int-uint.md) | +| SERIAL | [UInt32](../../sql-reference/data-types/int-uint.md) | +| BIGSERIAL | [UInt64](../../sql-reference/data-types/int-uint.md) | +| TEXT, CHAR | [String](../../sql-reference/data-types/string.md) | +| INTEGER | Nullable([Int32](../../sql-reference/data-types/int-uint.md))| +| ARRAY | [Array](../../sql-reference/data-types/array.md) | + + +## Примеры использования {#examples-of-use} + +Обмен данными между БД ClickHouse и сервером PostgreSQL: + +``` sql +CREATE DATABASE test_database +ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 1); +``` + +``` sql +SHOW DATABASES; +``` + +``` text +┌─name──────────┐ +│ default │ +│ test_database │ +│ system │ +└───────────────┘ +``` + +``` sql +SHOW TABLES FROM test_database; +``` + +``` text +┌─name───────┐ +│ test_table │ +└────────────┘ +``` + +Чтение данных из таблицы PostgreSQL: + +``` sql +SELECT * FROM test_database.test_table; +``` + +``` text +┌─id─┬─value─┐ +│ 1 │ 2 │ +└────┴───────┘ +``` + +Запись данных в таблицу PostgreSQL: + +``` sql +INSERT INTO test_database.test_table VALUES (3,4); +SELECT * FROM test_database.test_table; +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +│ 3 │ 4 │ +└────────┴───────┘ +``` + +Пусть структура таблицы была изменена в PostgreSQL: + +``` sql +postgre> ALTER TABLE test_table ADD COLUMN data Text +``` + +Поскольку при создании БД параметр `use_table_cache` был установлен в значение `1`, структура таблицы в ClickHouse была кеширована и поэтому не изменилась: + +``` sql +DESCRIBE TABLE test_database.test_table; +``` +``` text +┌─name───┬─type──────────────┐ +│ id │ Nullable(Integer) │ +│ value │ Nullable(Integer) │ +└────────┴───────────────────┘ +``` + +После того как таблицу «отцепили» и затем снова «прицепили», структура обновилась: + +``` sql +DETACH TABLE test_database.test_table; +ATTACH TABLE test_database.test_table; +DESCRIBE TABLE test_database.test_table; +``` +``` text +┌─name───┬─type──────────────┐ +│ id │ Nullable(Integer) │ +│ value │ Nullable(Integer) │ +│ data │ Nullable(String) │ +└────────┴───────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/ru/database-engines/postgresql/) diff --git a/docs/ru/engines/table-engines/index.md b/docs/ru/engines/table-engines/index.md index 6c11011a307..b17b2124250 100644 --- a/docs/ru/engines/table-engines/index.md +++ b/docs/ru/engines/table-engines/index.md @@ -16,7 +16,7 @@ toc_title: "Введение" - Возможно ли многопоточное выполнение запроса. - Параметры репликации данных. -## Семейства движков {#semeistva-dvizhkov} +## Семейства движков {#engine-families} ### MergeTree {#mergetree} @@ -42,7 +42,7 @@ toc_title: "Введение" - [StripeLog](log-family/stripelog.md#stripelog) - [Log](log-family/log.md#log) -### Движки для интеграции {#dvizhki-dlia-integratsii} +### Движки для интеграции {#integration-engines} Движки для связи с другими системами хранения и обработки данных. @@ -52,9 +52,22 @@ toc_title: "Введение" - [MySQL](integrations/mysql.md#mysql) - [ODBC](integrations/odbc.md#table-engine-odbc) - [JDBC](integrations/jdbc.md#table-engine-jdbc) +- [S3](integrations/s3.md#table-engine-s3) ### Специальные движки {#spetsialnye-dvizhki} +- [ODBC](../../engines/table-engines/integrations/odbc.md) +- [JDBC](../../engines/table-engines/integrations/jdbc.md) +- [MySQL](../../engines/table-engines/integrations/mysql.md) +- [MongoDB](../../engines/table-engines/integrations/mongodb.md) +- [HDFS](../../engines/table-engines/integrations/hdfs.md) +- [Kafka](../../engines/table-engines/integrations/kafka.md) +- [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) +- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md) +- [PostgreSQL](../../engines/table-engines/integrations/postgresql.md) + +### Специальные движки {#special-engines} + Движки семейства: - [Distributed](special/distributed.md#distributed) @@ -79,4 +92,3 @@ toc_title: "Введение" Чтобы получить данные из виртуального столбца, необходимо указать его название в запросе `SELECT`. `SELECT *` не отображает данные из виртуальных столбцов. При создании таблицы со столбцом, имя которого совпадает с именем одного из виртуальных столбцов таблицы, виртуальный столбец становится недоступным. Не делайте так. Чтобы помочь избежать конфликтов, имена виртуальных столбцов обычно предваряются подчеркиванием. - diff --git a/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md b/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md index f66e789a392..5a7909f63b2 100644 --- a/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md @@ -1,5 +1,5 @@ --- -toc_priority: 6 +toc_priority: 9 toc_title: EmbeddedRocksDB --- diff --git a/docs/ru/engines/table-engines/integrations/hdfs.md b/docs/ru/engines/table-engines/integrations/hdfs.md index 3d9cb388a01..c96ac12cd2a 100644 --- a/docs/ru/engines/table-engines/integrations/hdfs.md +++ b/docs/ru/engines/table-engines/integrations/hdfs.md @@ -1,5 +1,5 @@ --- -toc_priority: 4 +toc_priority: 6 toc_title: HDFS --- @@ -183,7 +183,7 @@ CREATE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9 #### Ограничения {#limitations} * hadoop\_security\_kerberos\_ticket\_cache\_path могут быть определены только на глобальном уровне -## Поддержика Kerberos {#kerberos-support} +## Поддержка Kerberos {#kerberos-support} Если hadoop\_security\_authentication параметр имеет значение 'kerberos', ClickHouse аутентифицируется с помощью Kerberos. [Расширенные параметры](#clickhouse-extras) и hadoop\_security\_kerberos\_ticket\_cache\_path помогают сделать это. diff --git a/docs/ru/engines/table-engines/integrations/jdbc.md b/docs/ru/engines/table-engines/integrations/jdbc.md index e2db6fac0b2..fd7411a258e 100644 --- a/docs/ru/engines/table-engines/integrations/jdbc.md +++ b/docs/ru/engines/table-engines/integrations/jdbc.md @@ -1,5 +1,5 @@ --- -toc_priority: 2 +toc_priority: 3 toc_title: JDBC --- diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index f053b80aebd..19e2850dd51 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -1,5 +1,5 @@ --- -toc_priority: 5 +toc_priority: 8 toc_title: Kafka --- diff --git a/docs/ru/engines/table-engines/integrations/mongodb.md b/docs/ru/engines/table-engines/integrations/mongodb.md index 5ab63494648..97f903bdf89 100644 --- a/docs/ru/engines/table-engines/integrations/mongodb.md +++ b/docs/ru/engines/table-engines/integrations/mongodb.md @@ -1,5 +1,5 @@ --- -toc_priority: 7 +toc_priority: 5 toc_title: MongoDB --- diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 9152a57d122..5011c8a93c6 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -1,5 +1,5 @@ --- -toc_priority: 3 +toc_priority: 4 toc_title: MySQL --- diff --git a/docs/ru/engines/table-engines/integrations/odbc.md b/docs/ru/engines/table-engines/integrations/odbc.md index b2faa9b1e9e..669977ff531 100644 --- a/docs/ru/engines/table-engines/integrations/odbc.md +++ b/docs/ru/engines/table-engines/integrations/odbc.md @@ -1,5 +1,5 @@ --- -toc_priority: 1 +toc_priority: 2 toc_title: ODBC --- diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index ecf431830f8..cb8e38ae5c9 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -1,11 +1,11 @@ --- -toc_priority: 8 +toc_priority: 11 toc_title: PostgreSQL --- -# PosgtreSQL {#postgresql} +#PostgreSQL {#postgresql} -Движок PostgreSQL позволяет выполнять запросы `SELECT` над данными, хранящимися на удалённом PostgreSQL сервере. +Движок PostgreSQL позволяет выполнять запросы `SELECT` и `INSERT` для таблиц на удаленном сервере PostgreSQL. ## Создание таблицы {#creating-a-table} @@ -15,39 +15,65 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], ... -) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'); +) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]); ``` Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). Структура таблицы может отличаться от исходной структуры таблицы PostgreSQL: -- Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. +- Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. - Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. **Параметры движка** - `host:port` — адрес сервера PostgreSQL. - - `database` — Имя базы данных на сервере PostgreSQL. - - `table` — Имя таблицы. - - `user` — Имя пользователя PostgreSQL. - - `password` — Пароль пользователя PostgreSQL. +- `schema` — имя схемы, если не используется схема по умолчанию. Необязательный аргумент. -SELECT запросы на стороне PostgreSQL выполняются как `COPY (SELECT ...) TO STDOUT` внутри транзакции PostgreSQL только на чтение с коммитом после каждого `SELECT` запроса. +## Особенности реализации {#implementation-details} -Простые условия для `WHERE` такие как `=, !=, >, >=, <, <=, IN` исполняются на стороне PostgreSQL сервера. +Запросы `SELECT` на стороне PostgreSQL выполняются как `COPY (SELECT ...) TO STDOUT` внутри транзакции PostgreSQL только на чтение с коммитом после каждого запроса `SELECT`. -Все операции объединения, аггрегации, сортировки, условия `IN [ array ]` и ограничения `LIMIT` выполняются на стороне ClickHouse только после того как запрос к PostgreSQL закончился. +Простые условия для `WHERE`, такие как `=`, `!=`, `>`, `>=`, `<`, `<=` и `IN`, исполняются на стороне PostgreSQL сервера. -INSERT запросы на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого `INSERT` запроса. +Все операции объединения, аггрегации, сортировки, условия `IN [ array ]` и ограничения `LIMIT` выполняются на стороне ClickHouse только после того, как запрос к PostgreSQL закончился. + +Запросы `INSERT` на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого запроса `INSERT`. PostgreSQL массивы конвертируются в массивы ClickHouse. -Будьте осторожны в PostgreSQL массивы созданные как type_name[], являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы, внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. + +!!! info "Внимание" + Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустимы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. + +При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`. + +В примере ниже реплика `example01-1` имеет более высокий приоритет: + +```xml + + 5432 + clickhouse + qwerty + + example01-1 + 1 + + + example01-2 + 2 + + db_name +
table_name
+ id=10 + SQL_QUERY + + +``` ## Пример использования {#usage-example} @@ -64,17 +90,17 @@ PRIMARY KEY (int_id)); CREATE TABLE -postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2); +postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); INSERT 0 1 -postgresql> select * from test; - int_id | int_nullable | float | str | float_nullable ---------+--------------+-------+------+---------------- - 1 | | 2 | test | -(1 row) +postgresql> SELECT * FROM test; + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | + (1 row) ``` -Таблица в ClickHouse, получение данных из PostgreSQL таблицы созданной выше: +Таблица в ClickHouse, получение данных из PostgreSQL таблицы, созданной выше: ``` sql CREATE TABLE default.postgresql_table @@ -87,19 +113,33 @@ ENGINE = PostgreSQL('localhost:5432', 'public', 'test', 'postges_user', 'postgre ``` ``` sql -SELECT * FROM postgresql_table WHERE str IN ('test') +SELECT * FROM postgresql_table WHERE str IN ('test'); ``` ``` text ┌─float_nullable─┬─str──┬─int_id─┐ │ ᴺᵁᴸᴸ │ test │ 1 │ └────────────────┴──────┴────────┘ -1 rows in set. Elapsed: 0.019 sec. ``` +Using Non-default Schema: -## Смотри также {#see-also} +```text +postgres=# CREATE SCHEMA "nice.schema"; -- [Табличная функция ‘postgresql’](../../../sql-reference/table-functions/postgresql.md) -- [Использование PostgreSQL в качестве истояника для внешнего словаря](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) +postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer); +postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i) +``` + +```sql +CREATE TABLE pg_table_schema_with_dots (a UInt32) + ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); +``` + +**См. также** + +- [Табличная функция `postgresql`](../../../sql-reference/table-functions/postgresql.md) +- [Использование PostgreSQL в качестве источника для внешнего словаря](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) + +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/postgresql/) diff --git a/docs/ru/engines/table-engines/integrations/s3.md b/docs/ru/engines/table-engines/integrations/s3.md index fa10e8ebc34..177d69dc3e0 100644 --- a/docs/ru/engines/table-engines/integrations/s3.md +++ b/docs/ru/engines/table-engines/integrations/s3.md @@ -11,21 +11,21 @@ toc_title: S3 ``` sql CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) ``` **Параметры движка** - `path` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные знаки в режиме "только чтение": `*`, `?`, `{abc,def}` и `{N..M}` где `N`, `M` — числа, `'abc'`, `'def'` — строки. Подробнее смотри [ниже](#wildcards-in-path). - `format` — [формат](../../../interfaces/formats.md#formats) файла. -- `structure` — структура таблицы в формате `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — тип сжатия. Возможные значения: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Необязательный параметр. Если не указано, то тип сжатия определяется автоматически по расширению файла. +- `aws_access_key_id`, `aws_secret_access_key` - данные пользователя учетной записи [AWS](https://aws.amazon.com/ru/). Вы можете использовать их для аутентификации ваших запросов. Необязательный параметр. Если параметры учетной записи не указаны, то используются данные из конфигурационного файла. Смотрите подробнее [Использование сервиса S3 для хранения данных](../mergetree-family/mergetree.md#table_engine-mergetree-s3). +- `compression` — тип сжатия. Возможные значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Необязательный параметр. Если не указано, то тип сжатия определяется автоматически по расширению файла. **Пример** ``` sql CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip'); +ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); SELECT * FROM s3_engine_table LIMIT 2; ``` @@ -73,17 +73,19 @@ SELECT * FROM s3_engine_table LIMIT 2; Соображение безопасности: если злонамеренный пользователь попробует указать произвольные URL-адреса S3, параметр `s3_max_redirects` должен быть установлен в ноль, чтобы избежать атак [SSRF] (https://en.wikipedia.org/wiki/Server-side_request_forgery). Как альтернатива, в конфигурации сервера должен быть указан `remote_host_filter`. -## Настройки конечных точек {#endpoint-settings} +## Настройки точки приема запроса {#endpoint-settings} -Для конечной точки (которая соответствует точному префиксу URL-адреса) в конфигурационном файле могут быть заданы следующие настройки: +Для точки приема запроса (которая соответствует точному префиксу URL-адреса) в конфигурационном файле могут быть заданы следующие настройки: Обязательная настройка: -- `endpoint` — указывает префикс конечной точки. +- `endpoint` — указывает префикс точки приема запроса. Необязательные настройки: -- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной конечной точкой. -- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных Amazon EC2 для данной конечной точки. Значение по умолчанию - `false`. -- `header` — добавляет указанный HTTP-заголовок к запросу на заданную конечную точку. Может быть определен несколько раз. +- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной точкой приема запроса. +- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных [Amazon EC2](https://ru.wikipedia.org/wiki/Amazon_EC2) для данной точки приема запроса. Значение по умолчанию — `false`. +- `use_insecure_imds_request` — признак использования менее безопасного соединения при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию — `false`. +- `region` — название региона S3. +- `header` — добавляет указанный HTTP-заголовок к запросу на заданную точку приема запроса. Может быть определен несколько раз. - `server_side_encryption_customer_key_base64` — устанавливает необходимые заголовки для доступа к объектам S3 с шифрованием SSE-C. **Пример** @@ -94,7 +96,9 @@ SELECT * FROM s3_engine_table LIMIT 2; https://storage.yandexcloud.net/my-test-bucket-768/ + + @@ -103,7 +107,7 @@ SELECT * FROM s3_engine_table LIMIT 2; ## Примеры использования {#usage-examples} -Предположим, у нас есть несколько файлов в формате TSV со следующими URL-адресами в HDFS: +Предположим, у нас есть несколько файлов в формате TSV со следующими URL-адресами в S3: - 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv' - 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv' @@ -133,8 +137,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); ``` -!!! warning "Warning" - Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`. +Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`. 4. Создание таблицы из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`: @@ -142,9 +145,7 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_p CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); ``` + **Смотрите также** - [Табличная функция S3](../../../sql-reference/table-functions/s3.md) - -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/s3/) - diff --git a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md index 9a09618e508..4f0206158f1 100644 --- a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -34,6 +34,8 @@ ORDER BY (CounterID, StartDate, intHash32(UserID)); В этом примере задано партиционирование по типам событий, произошедших в течение текущей недели. +По умолчанию, ключ партиционирования с плавающей запятой не поддерживается. Чтобы использовать его, включите настройку [allow_floating_point_partition_key](../../../operations/settings/merge-tree-settings.md#allow_floating_point_partition_key). + Каждая партиция состоит из отдельных фрагментов или так называемых *кусков данных*. Каждый кусок отсортирован по первичному ключу. При вставке данных в таблицу каждая отдельная запись сохраняется в виде отдельного куска. Через некоторое время после вставки (обычно до 10 минут), ClickHouse выполняет в фоновом режиме слияние данных — в результате куски для одной и той же партиции будут объединены в более крупный кусок. !!! info "Info" diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index 7d7641a417d..4cff6fcfb80 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -727,6 +727,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' https://storage.yandexcloud.net/my-bucket/root-path/ your_access_key_id your_secret_access_key + http://proxy1 http://proxy2 @@ -753,7 +754,9 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' Необязательные параметры: -- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`. +- `region` — название региона S3. +- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из сетевого окружения, а также из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`. +- `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию: `false`. - `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера. - `connect_timeout_ms` — таймаут подключения к сокету в миллисекундах. Значение по умолчанию: 10 секунд. - `request_timeout_ms` — таймаут выполнения запроса в миллисекундах. Значение по умолчанию: 5 секунд. diff --git a/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md index ec0b339e8c9..ebd7875179d 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md @@ -33,11 +33,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **Параметры ReplacingMergeTree** -- `ver` — столбец с версией, тип `UInt*`, `Date` или `DateTime`. Необязательный параметр. +- `ver` — столбец с номером версии. Тип `UInt*`, `Date`, `DateTime` или `DateTime64`. Необязательный параметр. При слиянии `ReplacingMergeTree` оставляет только строку для каждого уникального ключа сортировки: - - Последнюю в выборке, если `ver` не задан. Под выборкой здесь понимается набор строк в наборе партов, участвующих в слиянии. Последний по времени создания парт (последний инсерт) будет последним в выборке. Таким образом, после дедупликации для каждого значения ключа сортировки останется самая последняя строка из самого последнего инсерта. + - Последнюю в выборке, если `ver` не задан. Под выборкой здесь понимается набор строк в наборе кусков данных, участвующих в слиянии. Последний по времени создания кусок (последняя вставка) будет последним в выборке. Таким образом, после дедупликации для каждого значения ключа сортировки останется самая последняя строка из самой последней вставки. - С максимальной версией, если `ver` задан. **Секции запроса** @@ -62,7 +62,6 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Все параметры, кроме `ver` имеют то же значение, что в и `MergeTree`. -- `ver` — столбец с версией. Необязательный параметр. Описание смотрите выше по тексту. +- `ver` — столбец с номером версии. Необязательный параметр. Описание смотрите выше по тексту. - diff --git a/docs/ru/engines/table-engines/mergetree-family/replication.md b/docs/ru/engines/table-engines/mergetree-family/replication.md index 848adbee4da..cb92084695a 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replication.md +++ b/docs/ru/engines/table-engines/mergetree-family/replication.md @@ -65,6 +65,8 @@ ClickHouse хранит метаинформацию о репликах в [Apa Репликация асинхронная, мульти-мастер. Запросы `INSERT` и `ALTER` можно направлять на любой доступный сервер. Данные вставятся на сервер, где выполнен запрос, а затем скопируются на остальные серверы. В связи с асинхронностью, только что вставленные данные появляются на остальных репликах с небольшой задержкой. Если часть реплик недоступна, данные на них запишутся тогда, когда они станут доступны. Если реплика доступна, то задержка составляет столько времени, сколько требуется для передачи блока сжатых данных по сети. Количество потоков для выполнения фоновых задач можно задать с помощью настройки [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size). +Движок `ReplicatedMergeTree` использует отдельный пул потоков для скачивания кусков данных. Размер пула ограничен настройкой [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size), которую можно указать при перезапуске сервера. + По умолчанию, запрос INSERT ждёт подтверждения записи только от одной реплики. Если данные были успешно записаны только на одну реплику, и сервер с этой репликой перестал существовать, то записанные данные будут потеряны. Вы можете включить подтверждение записи от нескольких реплик, используя настройку `insert_quorum`. Каждый блок данных записывается атомарно. Запрос INSERT разбивается на блоки данных размером до `max_insert_block_size = 1048576` строк. То есть, если в запросе `INSERT` менее 1048576 строк, то он делается атомарно. @@ -249,5 +251,6 @@ $ sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data **Смотрите также** - [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) +- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) - [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold) diff --git a/docs/ru/getting-started/example-datasets/cell-towers.md b/docs/ru/getting-started/example-datasets/cell-towers.md new file mode 100644 index 00000000000..a5524248019 --- /dev/null +++ b/docs/ru/getting-started/example-datasets/cell-towers.md @@ -0,0 +1,128 @@ +--- +toc_priority: 21 +toc_title: Вышки сотовой связи +--- + +# Вышки сотовой связи {#cell-towers} + +Источник этого набора данных (dataset) - самая большая в мире открытая база данных о сотовых вышках - [OpenCellid](https://www.opencellid.org/). К 2021-му году здесь накопилось более, чем 40 миллионов записей о сотовых вышках (GSM, LTE, UMTS, и т.д.) по всему миру с их географическими координатами и метаданными (код страны, сети, и т.д.). + +OpenCelliD Project имеет лицензию Creative Commons Attribution-ShareAlike 4.0 International License, и мы распространяем снэпшот набора данных по условиям этой же лицензии. После авторизации можно загрузить последнюю версию набора данных. + +## Как получить набор данных {#get-the-dataset} + +1. Загрузите снэпшот набора данных за февраль 2021 [отсюда](https://datasets.clickhouse.tech/cell_towers.csv.xz) (729 MB). + +2. Если нужно, проверьте полноту и целостность при помощи команды: + +``` +md5sum cell_towers.csv.xz +8cf986f4a0d9f12c6f384a0e9192c908 cell_towers.csv.xz +``` + +3. Распакуйте набор данных при помощи команды: + +``` +xz -d cell_towers.csv.xz +``` + +4. Создайте таблицу: + +``` +CREATE TABLE cell_towers +( + radio Enum8('' = 0, 'CDMA' = 1, 'GSM' = 2, 'LTE' = 3, 'NR' = 4, 'UMTS' = 5), + mcc UInt16, + net UInt16, + area UInt16, + cell UInt64, + unit Int16, + lon Float64, + lat Float64, + range UInt32, + samples UInt32, + changeable UInt8, + created DateTime, + updated DateTime, + averageSignal UInt8 +) +ENGINE = MergeTree ORDER BY (radio, mcc, net, created); +``` + +5. Вставьте данные: +``` +clickhouse-client --query "INSERT INTO cell_towers FORMAT CSVWithNames" < cell_towers.csv +``` + +## Примеры {#examples} + +1. Количество вышек по типам: + +``` +SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC + +┌─radio─┬────────c─┐ +│ UMTS │ 20686487 │ +│ LTE │ 12101148 │ +│ GSM │ 9931312 │ +│ CDMA │ 556344 │ +│ NR │ 867 │ +└───────┴──────────┘ + +5 rows in set. Elapsed: 0.011 sec. Processed 43.28 million rows, 43.28 MB (3.83 billion rows/s., 3.83 GB/s.) +``` + +2. Количество вышек по [мобильному коду страны (MCC)](https://ru.wikipedia.org/wiki/Mobile_Country_Code): + +``` +SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 + +┌─mcc─┬─count()─┐ +│ 310 │ 5024650 │ +│ 262 │ 2622423 │ +│ 250 │ 1953176 │ +│ 208 │ 1891187 │ +│ 724 │ 1836150 │ +│ 404 │ 1729151 │ +│ 234 │ 1618924 │ +│ 510 │ 1353998 │ +│ 440 │ 1343355 │ +│ 311 │ 1332798 │ +└─────┴─────────┘ + +10 rows in set. Elapsed: 0.019 sec. Processed 43.28 million rows, 86.55 MB (2.33 billion rows/s., 4.65 GB/s.) +``` + +Можно увидеть, что по количеству вышек лидируют следующие страны: США, Германия, Россия. + +Вы также можете создать [внешний словарь](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) в ClickHouse для того, чтобы расшифровать эти значения. + +## Пример использования {#use-case} + +Рассмотрим применение функции `pointInPolygon`. + +1. Создаем таблицу, в которой будем хранить многоугольники: + +``` +CREATE TEMPORARY TABLE moscow (polygon Array(Tuple(Float64, Float64))); +``` + +2. Очертания Москвы выглядят приблизительно так ("Новая Москва" в них не включена): + +``` +INSERT INTO moscow VALUES ([(37.84172564285271, 55.78000432402266), (37.8381207618713, 55.775874525970494), (37.83979446823122, 55.775626746008065), (37.84243326983639, 55.77446586811748), (37.84262672750849, 55.771974101091104), (37.84153238623039, 55.77114545193181), (37.841124690460184, 55.76722010265554), (37.84239076983644, 55.76654891107098), (37.842283558197025, 55.76258709833121), (37.8421759312134, 55.758073999993734), (37.84198330422974, 55.75381499999371), (37.8416827275085, 55.749277102484484), (37.84157576190186, 55.74794544108413), (37.83897929098507, 55.74525257875241), (37.83739676451868, 55.74404373042019), (37.838732481460525, 55.74298009816793), (37.841183997352545, 55.743060321833575), (37.84097476190185, 55.73938799999373), (37.84048155819702, 55.73570799999372), (37.840095812164286, 55.73228210777237), (37.83983814285274, 55.73080491981639), (37.83846476321406, 55.729799917464675), (37.83835745269769, 55.72919751082619), (37.838636380279524, 55.72859509486539), (37.8395161005249, 55.727705075632784), (37.83897964285276, 55.722727886185154), (37.83862557539366, 55.72034817326636), (37.83559735744853, 55.71944437307499), (37.835370708803126, 55.71831419154461), (37.83738169402022, 55.71765218986692), (37.83823396494291, 55.71691750159089), (37.838056931213345, 55.71547311301385), (37.836812846557606, 55.71221445615604), (37.83522525396725, 55.709331054395555), (37.83269301586908, 55.70953687463627), (37.829667367706236, 55.70903403789297), (37.83311126588435, 55.70552351822608), (37.83058993121339, 55.70041317726053), (37.82983872750851, 55.69883771404813), (37.82934501586913, 55.69718947487017), (37.828926414016685, 55.69504441658371), (37.82876530422971, 55.69287499999378), (37.82894754100031, 55.690759754047335), (37.827697554878185, 55.68951421135665), (37.82447346292115, 55.68965045405069), (37.83136543914793, 55.68322046195302), (37.833554015869154, 55.67814012759211), (37.83544184655761, 55.67295011628339), (37.837480388885474, 55.6672498719639), (37.838960677246064, 55.66316274139358), (37.83926093121332, 55.66046999999383), (37.839025050262435, 55.65869897264431), (37.83670784390257, 55.65794084879904), (37.835656529083245, 55.65694309303843), (37.83704060449217, 55.65689306460552), (37.83696819873806, 55.65550363526252), (37.83760389616388, 55.65487847246661), (37.83687972750851, 55.65356745541324), (37.83515216004943, 55.65155951234079), (37.83312418518067, 55.64979413590619), (37.82801726983639, 55.64640836412121), (37.820614174591, 55.64164525405531), (37.818908190475426, 55.6421883258084), (37.81717543386075, 55.64112490388471), (37.81690987037274, 55.63916106913107), (37.815099354492155, 55.637925371757085), (37.808769150787356, 55.633798276884455), (37.80100123544311, 55.62873670012244), (37.79598013491824, 55.62554336109055), (37.78634567724606, 55.62033499605651), (37.78334147619623, 55.618768681480326), (37.77746201055901, 55.619855533402706), (37.77527329626457, 55.61909966711279), (37.77801986242668, 55.618770300976294), (37.778212973541216, 55.617257701952106), (37.77784818518065, 55.61574504433011), (37.77016867724609, 55.61148576294007), (37.760191219573976, 55.60599579539028), (37.75338926983641, 55.60227892751446), (37.746329965606634, 55.59920577639331), (37.73939925396728, 55.59631430313617), (37.73273665739439, 55.5935318803559), (37.7299954450912, 55.59350760316188), (37.7268679946899, 55.59469840523759), (37.72626726983634, 55.59229549697373), (37.7262673598022, 55.59081598950582), (37.71897193121335, 55.5877595845419), (37.70871550793456, 55.58393177431724), (37.700497489410374, 55.580917323756644), (37.69204305026244, 55.57778089778455), (37.68544477378839, 55.57815154690915), (37.68391050793454, 55.57472945079756), (37.678803592590306, 55.57328235936491), (37.6743402539673, 55.57255251445782), (37.66813862698363, 55.57216388774464), (37.617927457672096, 55.57505691895805), (37.60443099999999, 55.5757737568051), (37.599683515869145, 55.57749105910326), (37.59754177842709, 55.57796291823627), (37.59625834786988, 55.57906686095235), (37.59501783265684, 55.57746616444403), (37.593090671936025, 55.57671634534502), (37.587018007904, 55.577944600233785), (37.578692203704804, 55.57982895000019), (37.57327546607398, 55.58116294118248), (37.57385012109279, 55.581550362779), (37.57399562266922, 55.5820107079112), (37.5735356072979, 55.58226289171689), (37.57290393054962, 55.582393529795155), (37.57037722355653, 55.581919415056234), (37.5592298306885, 55.584471614867844), (37.54189249206543, 55.58867650795186), (37.5297256269836, 55.59158133551745), (37.517837865081766, 55.59443656218868), (37.51200186508174, 55.59635625174229), (37.506808949737554, 55.59907823904434), (37.49820432275389, 55.6062944994944), (37.494406071441674, 55.60967103463367), (37.494760001358024, 55.61066689753365), (37.49397137107085, 55.61220931698269), (37.49016528606031, 55.613417718449064), (37.48773249206542, 55.61530616333343), (37.47921386508177, 55.622640129112334), (37.470652153442394, 55.62993723476164), (37.46273446298218, 55.6368075123157), (37.46350692265317, 55.64068225239439), (37.46050283203121, 55.640794546982576), (37.457627470916734, 55.64118904154646), (37.450718034393326, 55.64690488145138), (37.44239252645875, 55.65397824729769), (37.434587576721185, 55.66053543155961), (37.43582144975277, 55.661693766520735), (37.43576786245721, 55.662755031737014), (37.430982915344174, 55.664610641628116), (37.428547447097685, 55.66778515273695), (37.42945134592044, 55.668633314343566), (37.42859571562949, 55.66948145750025), (37.4262836402282, 55.670813882451405), (37.418709037048295, 55.6811141674414), (37.41922139651101, 55.68235377885389), (37.419218771842885, 55.68359335082235), (37.417196501327446, 55.684375235224735), (37.41607020370478, 55.68540557585352), (37.415640857147146, 55.68686637150793), (37.414632153442334, 55.68903015131686), (37.413344899475064, 55.690896881757396), (37.41171432275391, 55.69264232162232), (37.40948282275393, 55.69455101638112), (37.40703674603271, 55.69638690385348), (37.39607169577025, 55.70451821283731), (37.38952706878662, 55.70942491932811), (37.387778313491815, 55.71149057784176), (37.39049275399779, 55.71419814298992), (37.385557272491454, 55.7155489617061), (37.38388335714726, 55.71849856042102), (37.378368238098155, 55.7292763261685), (37.37763597123337, 55.730845879211614), (37.37890062088197, 55.73167906388319), (37.37750451918789, 55.734703664681774), (37.375610832015965, 55.734851959522246), (37.3723813571472, 55.74105626086403), (37.37014935714723, 55.746115620904355), (37.36944173016362, 55.750883999993725), (37.36975304365541, 55.76335905525834), (37.37244070571134, 55.76432079697595), (37.3724259757175, 55.76636979670426), (37.369922155757884, 55.76735417953104), (37.369892695770275, 55.76823419316575), (37.370214730163575, 55.782312184391266), (37.370493611114505, 55.78436801120489), (37.37120164550783, 55.78596427165359), (37.37284851456452, 55.7874378183096), (37.37608325135799, 55.7886695054807), (37.3764587460632, 55.78947647305964), (37.37530000265506, 55.79146512926804), (37.38235915344241, 55.79899647809345), (37.384344043655396, 55.80113596939471), (37.38594269577028, 55.80322699999366), (37.38711208598329, 55.804919036911976), (37.3880239841309, 55.806610999993666), (37.38928977249147, 55.81001864976979), (37.39038389947512, 55.81348641242801), (37.39235781481933, 55.81983538336746), (37.393709457672124, 55.82417822811877), (37.394685720901464, 55.82792275755836), (37.39557615344238, 55.830447148154136), (37.39844478226658, 55.83167107969975), (37.40019761214057, 55.83151823557964), (37.400398790382326, 55.83264967594742), (37.39659544313046, 55.83322180909622), (37.39667059524539, 55.83402792148566), (37.39682089947515, 55.83638877400216), (37.39643489154053, 55.83861656112751), (37.3955338994751, 55.84072348043264), (37.392680272491454, 55.84502158126453), (37.39241188227847, 55.84659117913199), (37.392529730163616, 55.84816071336481), (37.39486835714723, 55.85288092980303), (37.39873052645878, 55.859893456073635), (37.40272161111449, 55.86441833633205), (37.40697072750854, 55.867579567544375), (37.410007082016016, 55.868369880337), (37.4120992989502, 55.86920843741314), (37.412668021163924, 55.87055369615854), (37.41482461111453, 55.87170587948249), (37.41862266137694, 55.873183961039565), (37.42413732540892, 55.874879126654704), (37.4312182698669, 55.875614937236705), (37.43111093783558, 55.8762723478417), (37.43332105622856, 55.87706546369396), (37.43385747619623, 55.87790681284802), (37.441303050262405, 55.88027084462084), (37.44747234260555, 55.87942070143253), (37.44716141796871, 55.88072960917233), (37.44769797085568, 55.88121221323979), (37.45204320500181, 55.882080694420715), (37.45673176190186, 55.882346110794586), (37.463383999999984, 55.88252729504517), (37.46682797486874, 55.88294937719063), (37.470014457672086, 55.88361266759345), (37.47751410450743, 55.88546991372396), (37.47860317658232, 55.88534929207307), (37.48165826025772, 55.882563306475106), (37.48316434442331, 55.8815803226785), (37.483831555817645, 55.882427612793315), (37.483182967125686, 55.88372791409729), (37.483092277908824, 55.88495581062434), (37.4855716508179, 55.8875561994203), (37.486440636245746, 55.887827444039566), (37.49014203439328, 55.88897899871799), (37.493210285705544, 55.890208937135604), (37.497512451065035, 55.891342397444696), (37.49780744510645, 55.89174030252967), (37.49940333499519, 55.89239745507079), (37.50018383334346, 55.89339220941865), (37.52421672750851, 55.903869074155224), (37.52977457672118, 55.90564076517974), (37.53503220370484, 55.90661661218259), (37.54042858064267, 55.90714113744566), (37.54320461007303, 55.905645048442985), (37.545686966066306, 55.906608607018505), (37.54743976120755, 55.90788552162358), (37.55796999999999, 55.90901557907218), (37.572711542327866, 55.91059395704873), (37.57942799999998, 55.91073854155573), (37.58502865872187, 55.91009969268444), (37.58739968913264, 55.90794809960554), (37.59131567193598, 55.908713267595054), (37.612687423278814, 55.902866854295375), (37.62348079629517, 55.90041967242986), (37.635797880950896, 55.898141151686396), (37.649487626983664, 55.89639275532968), (37.65619302513125, 55.89572360207488), (37.66294133862307, 55.895295577183965), (37.66874564418033, 55.89505457604897), (37.67375601586915, 55.89254677027454), (37.67744661901856, 55.8947775867987), (37.688347, 55.89450045676125), (37.69480554232789, 55.89422926332761), (37.70107096560668, 55.89322256101114), (37.705962965606716, 55.891763491662616), (37.711885134918205, 55.889110234998974), (37.71682005026245, 55.886577568759876), (37.7199315476074, 55.88458159806678), (37.72234560316464, 55.882281005794134), (37.72364385977171, 55.8809452036196), (37.725371142837474, 55.8809722706006), (37.727870902099546, 55.88037213862385), (37.73394330422971, 55.877941504088696), (37.745339592590376, 55.87208120378722), (37.75525267724611, 55.86703807949492), (37.76919976190188, 55.859821640197474), (37.827835219574, 55.82962968399116), (37.83341438888553, 55.82575289922351), (37.83652584655761, 55.82188784027888), (37.83809213491821, 55.81612575504693), (37.83605359521481, 55.81460347077685), (37.83632178569025, 55.81276696067908), (37.838623105812026, 55.811486181656385), (37.83912198147584, 55.807329380532785), (37.839079078033414, 55.80510270463816), (37.83965844708251, 55.79940712529036), (37.840581150787344, 55.79131399999368), (37.84172564285271, 55.78000432402266)]); +``` + +3. Проверяем, сколько сотовых вышек находится в Москве: + +``` +SELECT count() FROM cell_towers WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow)) + +┌─count()─┐ +│ 310463 │ +└─────────┘ + +1 rows in set. Elapsed: 0.067 sec. Processed 43.28 million rows, 692.42 MB (645.83 million rows/s., 10.33 GB/s.) +``` + +Вы можете протестировать другие запросы с помощью интерактивного ресурса [Playground](https://gh-api.clickhouse.tech/play?user=play). Например, [вот так](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=). Однако, обратите внимание, что здесь нельзя создавать временные таблицы. diff --git a/docs/ru/getting-started/example-datasets/index.md b/docs/ru/getting-started/example-datasets/index.md index f590300adda..756b3a75dee 100644 --- a/docs/ru/getting-started/example-datasets/index.md +++ b/docs/ru/getting-started/example-datasets/index.md @@ -16,4 +16,5 @@ toc_title: "Введение" - [AMPLab Big Data Benchmark](amplab-benchmark.md) - [Данные о такси в Нью-Йорке](nyc-taxi.md) - [OnTime](ontime.md) +- [Вышки сотовой связи](../../getting-started/example-datasets/cell-towers.md) diff --git a/docs/ru/getting-started/example-datasets/ontime.md b/docs/ru/getting-started/example-datasets/ontime.md index be5b1cd1b70..d46b7e75e7f 100644 --- a/docs/ru/getting-started/example-datasets/ontime.md +++ b/docs/ru/getting-started/example-datasets/ontime.md @@ -27,126 +27,127 @@ done Создание таблицы: ``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String +CREATE TABLE `ontime` +( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `Reporting_Airline` String, + `DOT_ID_Reporting_Airline` Int32, + `IATA_CODE_Reporting_Airline` String, + `Tail_Number` Int32, + `Flight_Number_Reporting_Airline` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Nullable(Int32), + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String ) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; + PARTITION BY Year + ORDER BY (IATA_CODE_Reporting_Airline, FlightDate) + SETTINGS index_granularity = 8192; ``` Загрузка данных: ``` bash -$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'" ``` ## Скачивание готовых партиций {#skachivanie-gotovykh-partitsii} @@ -211,7 +212,7 @@ LIMIT 10; Q4. Количество задержек по перевозчикам за 2007 год ``` sql -SELECT Carrier, count(*) +SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier @@ -225,29 +226,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` Более оптимальная версия того же запроса: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year=2007 GROUP BY Carrier @@ -261,29 +262,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` Более оптимальная версия того же запроса: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier @@ -302,7 +303,7 @@ FROM from ontime WHERE DepDelay>10 GROUP BY Year -) +) q JOIN ( select @@ -310,7 +311,7 @@ JOIN count(*) as c2 from ontime GROUP BY Year -) USING (Year) +) qq USING (Year) ORDER BY Year; ``` @@ -346,7 +347,7 @@ Q10. ``` sql SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, + min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, sum(ArrDelayMinutes>30) AS flights_delayed, round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 4ae27a910ea..d0a54d9043a 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -95,7 +95,9 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh - [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` - [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` -После скачивания, можно воспользоваться `clickhouse client` для подключения к серверу, или `clickhouse local` для обработки локальных данных. Для запуска `clickhouse server` необходимо скачать конфигурационные файлы [сервера](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) и [пользователей](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) с GitHub. +После скачивания можно воспользоваться `clickhouse client` для подключения к серверу или `clickhouse local` для обработки локальных данных. + +Чтобы установить ClickHouse в рамках всей системы (с необходимыми конфигурационными файлами, настройками пользователей и т.д.), выполните `sudo ./clickhouse install`. Затем выполните команды `clickhouse start` (чтобы запустить сервер) и `clickhouse-client` (чтобы подключиться к нему). Данные сборки не рекомендуются для использования в продакшене, так как они недостаточно тщательно протестированны. Также, в них присутствуют не все возможности ClickHouse. @@ -172,4 +174,3 @@ SELECT 1 **Поздравляем, система работает!** Для дальнейших экспериментов можно попробовать загрузить один из тестовых наборов данных или пройти [пошаговое руководство для начинающих](https://clickhouse.tech/tutorial.html). - diff --git a/docs/ru/guides/apply-catboost-model.md b/docs/ru/guides/apply-catboost-model.md index 11964c57fc7..db2be63692f 100644 --- a/docs/ru/guides/apply-catboost-model.md +++ b/docs/ru/guides/apply-catboost-model.md @@ -158,7 +158,9 @@ FROM amazon_train /home/catboost/data/libcatboostmodel.so /home/catboost/models/*_model.xml ``` - +!!! note "Примечание" + Вы можете позднее изменить путь к конфигурации модели CatBoost без перезагрузки сервера. + ## 4. Запустите вывод модели из SQL {#run-model-inference} Для тестирования модели запустите клиент ClickHouse `$ clickhouse client`. diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 96ec36be79f..277b73a6d36 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -121,6 +121,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe - `--user, -u` — имя пользователя, по умолчанию — ‘default’. - `--password` — пароль, по умолчанию — пустая строка. - `--query, -q` — запрос для выполнения, при использовании в неинтерактивном режиме. +- `--queries-file, -qf` - путь к файлу с запросами для выполнения. Необходимо указать только одну из опций: `query` или `queries-file`. - `--database, -d` — выбрать текущую БД. Без указания значение берется из настроек сервера (по умолчанию — БД ‘default’). - `--multiline, -m` — если указано — разрешить многострочные запросы, не отправлять запрос по нажатию Enter. - `--multiquery, -n` — если указано — разрешить выполнять несколько запросов, разделённых точкой с запятой. @@ -130,6 +131,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe - `--stacktrace` — если указано, в случае исключения, выводить также его стек-трейс. - `--config-file` — имя конфигурационного файла. - `--secure` — если указано, будет использован безопасный канал. +- `--history_file` - путь к файлу с историей команд. - `--param_` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters). Начиная с версии 20.5, в `clickhouse-client` есть автоматическая подсветка синтаксиса (включена всегда). diff --git a/docs/ru/interfaces/third-party/gui.md b/docs/ru/interfaces/third-party/gui.md index f913a0ff2cc..dc96c32e996 100644 --- a/docs/ru/interfaces/third-party/gui.md +++ b/docs/ru/interfaces/third-party/gui.md @@ -166,4 +166,25 @@ toc_title: "Визуальные интерфейсы от сторонних р [Как сконфигурировать ClickHouse в Looker.](https://docs.looker.com/setup-and-management/database-config/clickhouse) -[Original article](https://clickhouse.tech/docs/ru/interfaces/third-party/gui/) +### SeekTable {#seektable} + +[SeekTable](https://www.seektable.com) — это аналитический инструмент для самостоятельного анализа и обработки данных бизнес-аналитики. Он доступен как в виде облачного сервиса, так и в виде локальной версии. Отчеты из SeekTable могут быть встроены в любое веб-приложение. + +Основные возможности: + +- Удобный конструктор отчетов. +- Гибкая настройка отчетов SQL и создание запросов для специфичных отчетов. +- Интегрируется с ClickHouse, используя собственную точку приема запроса TCP/IP или интерфейс HTTP(S) (два разных драйвера). +- Поддерживает всю мощь диалекта ClickHouse SQL для построения запросов по различным измерениям и показателям. +- [WEB-API](https://www.seektable.com/help/web-api-integration) для автоматизированной генерации отчетов. +- Процесс разработки отчетов поддерживает [резервное копирование/восстановление данных](https://www.seektable.com/help/self-hosted-backup-restore); конфигурация моделей данных (кубов) / отчетов представляет собой удобочитаемый XML-файл, который может храниться в системе контроля версий. + +SeekTable [бесплатен](https://www.seektable.com/help/cloud-pricing) для личного/индивидуального использования. + +[Как сконфигурировать подключение ClickHouse в SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table) + +### Chadmin {#chadmin} + +[Chadmin](https://github.com/bun4uk/chadmin) — простой графический интерфейс для визуализации запущенных запросов на вашем кластере ClickHouse. Он отображает информацию о запросах и дает возможность их завершать. + +[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/ru/operations/configuration-files.md b/docs/ru/operations/configuration-files.md index 11a01d1e6d2..8b4b0da8f2b 100644 --- a/docs/ru/operations/configuration-files.md +++ b/docs/ru/operations/configuration-files.md @@ -6,9 +6,9 @@ toc_title: "Конфигурационные файлы" # Конфигурационные файлы {#configuration_files} -Основной конфигурационный файл сервера - `config.xml`. Он расположен в директории `/etc/clickhouse-server/`. +Основной конфигурационный файл сервера - `config.xml` или `config.yaml`. Он расположен в директории `/etc/clickhouse-server/`. -Отдельные настройки могут быть переопределены в файлах `*.xml` и `*.conf` из директории `config.d` рядом с конфигом. +Отдельные настройки могут быть переопределены в файлах `*.xml` и `*.conf`, а также `.yaml` (для файлов в формате YAML) из директории `config.d` рядом с конфигом. У элементов этих конфигурационных файлов могут быть указаны атрибуты `replace` или `remove`. @@ -25,7 +25,7 @@ toc_title: "Конфигурационные файлы" В элементе `users_config` файла `config.xml` можно указать относительный путь к конфигурационному файлу с настройками пользователей, профилей и квот. Значение `users_config` по умолчанию — `users.xml`. Если `users_config` не указан, то настройки пользователей, профилей и квот можно задать непосредственно в `config.xml`. Настройки пользователя могут быть разделены в несколько отдельных файлов аналогичных `config.xml` и `config.d\`. Имя директории задаётся также как `users_config`. -Имя директории задаётся так же, как имя файла в `users_config`, с подстановкой `.d` вместо `.xml`. +Имя директории задаётся так же, как имя файла в `users_config`, с подстановкой `.d` вместо `.xml`/`.yaml`. Директория `users.d` используется по умолчанию, также как `users.xml` используется для `users_config`. Например, можно иметь по отдельному конфигурационному файлу для каждого пользователя: @@ -52,3 +52,66 @@ $ cat /etc/clickhouse-server/users.d/alice.xml Сервер следит за изменениями конфигурационных файлов, а также файлов и ZooKeeper-узлов, которые были использованы при выполнении подстановок и переопределений, и перезагружает настройки пользователей и кластеров на лету. То есть, можно изменять кластера, пользователей и их настройки без перезапуска сервера. +## Примеры записи конфигурации на YAML {#example} + +Здесь можно рассмотреть пример реальной конфигурации записанной на YAML: [config.yaml.example](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.yaml.example). + +Между стандартами XML и YAML имеются различия, поэтому в этом разделе будут перечислены некоторые подсказки для написания конфигурации на YMAL. + +Для записи обычной пары ключ-значение следует использовать Scalar: +``` yaml +key: value +``` + +Для создания тега, содержащего подтеги следует использовать Map: +``` yaml +map_key: + key1: val1 + key2: val2 + key3: val3 +``` + +Для создания списка значений или подтегов, расположенных по определенному ключу, следует использовать Sequence: +``` yaml +seq_key: + - val1 + - val2 + - key1: val3 + - map: + key2: val4 + key3: val5 +``` + +В случае, усли необходимо объявить тег, аналогичный XML-атрибуту, необходимо задать скаляр, имеющий ключ с префиксом @ и заключенный в кавычки: + +``` yaml +map: + "@attr1": value1 + "@attr2": value2 + key: 123 +``` + +Из такой Map мы получим после конвертации: + +``` xml + + 123 + +``` + +Помимо Map, можно задавать атрибуты для Sequence: + +``` yaml +seq: + - "@attr1": value1 + - "@attr2": value2 + - 123 + - abc +``` + +Таким образом получая аналог следующей записи на XML: + +``` xml +123 +abc +``` diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index b50347f6196..abaf2a8f2da 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -101,6 +101,12 @@ ClickHouse проверяет условия для `min_part_size` и `min_part ``` +## database_atomic_delay_before_drop_table_sec {#database_atomic_delay_before_drop_table_sec} + +Устанавливает задержку перед удалением табличных данных, в секундах. Если запрос имеет идентификатор `SYNC`, эта настройка игнорируется. + +Значение по умолчанию: `480` (8 минут). + ## default\_database {#default-database} База данных по умолчанию. @@ -285,7 +291,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part ## interserver_http_host {#interserver-http-host} -Имя хоста, которое могут использовать другие серверы для обращения к этому. +Имя хоста, которое могут использовать другие серверы для обращения к этому хосту. Если не указано, то определяется аналогично команде `hostname -f`. @@ -297,11 +303,36 @@ ClickHouse проверяет условия для `min_part_size` и `min_part example.yandex.ru ``` +## interserver_https_port {#interserver-https-port} + +Порт для обмена данными между репликами ClickHouse по протоколу `HTTPS`. + +**Пример** + +``` xml +9010 +``` + +## interserver_https_host {#interserver-https-host} + +Имя хоста, которое могут использовать другие реплики для обращения к нему по протоколу `HTTPS`. + +**Пример** + +``` xml +example.yandex.ru +``` + + + ## interserver_http_credentials {#server-settings-interserver-http-credentials} Имя пользователя и пароль, использующиеся для аутентификации при [репликации](../../operations/server-configuration-parameters/settings.md) движками Replicated\*. Это имя пользователя и пароль используются только для взаимодействия между репликами кластера и никак не связаны с аутентификацией клиентов ClickHouse. Сервер проверяет совпадение имени и пароля для соединяющихся с ним реплик, а также использует это же имя и пароль для соединения с другими репликами. Соответственно, эти имя и пароль должны быть прописаны одинаковыми для всех реплик кластера. По умолчанию аутентификация не используется. +!!! note "Примечание" + Эти учетные данные являются общими для обмена данными по протоколам `HTTP` и `HTTPS`. + Раздел содержит следующие параметры: - `user` — имя пользователя. @@ -384,7 +415,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part Значения по умолчанию: при указанном `address` - `LOG_USER`, иначе - `LOG_DAEMON` - format - формат сообщений. Возможные значения - `bsd` и `syslog` -## send_crash_reports {#server_configuration_parameters-logger} +## send_crash_reports {#server_configuration_parameters-send_crash_reports} Настройки для отправки сообщений о сбоях в команду разработчиков ядра ClickHouse через [Sentry](https://sentry.io). Включение этих настроек, особенно в pre-production среде, может дать очень ценную информацию и поможет развитию ClickHouse. @@ -481,7 +512,15 @@ ClickHouse проверяет условия для `min_part_size` и `min_part ## max_concurrent_queries {#max-concurrent-queries} -Максимальное количество одновременно обрабатываемых запросов. +Определяет максимальное количество одновременно обрабатываемых запросов, связанных с таблицей семейства `MergeTree`. Запросы также могут быть ограничены настройками: [max_concurrent_queries_for_all_users](#max-concurrent-queries-for-all-users), [min_marks_to_honor_max_concurrent_queries](#min-marks-to-honor-max-concurrent-queries). + +!!! info "Примечание" + Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений. + +Возможные значения: + +- Положительное целое число. +- 0 — выключена. **Пример** @@ -509,6 +548,21 @@ ClickHouse проверяет условия для `min_part_size` и `min_part - [max_concurrent_queries](#max-concurrent-queries) +## min_marks_to_honor_max_concurrent_queries {#min-marks-to-honor-max-concurrent-queries} + +Определяет минимальное количество засечек, считываемых запросом для применения настройки [max_concurrent_queries](#max-concurrent-queries). + +Возможные значения: + +- Положительное целое число. +- 0 — выключена. + +**Пример** + +``` xml +10 +``` + ## max_connections {#max-connections} Максимальное количество входящих соединений. @@ -1159,4 +1213,3 @@ ClickHouse использует ZooKeeper для хранения метадан ``` - diff --git a/docs/ru/operations/settings/merge-tree-settings.md b/docs/ru/operations/settings/merge-tree-settings.md index bfc0b0a2644..2af99bb8026 100644 --- a/docs/ru/operations/settings/merge-tree-settings.md +++ b/docs/ru/operations/settings/merge-tree-settings.md @@ -55,6 +55,26 @@ Eсли число кусков в партиции превышает знач ClickHouse искусственно выполняет `INSERT` дольше (добавляет ‘sleep’), чтобы фоновый механизм слияния успевал слиять куски быстрее, чем они добавляются. +## inactive_parts_to_throw_insert {#inactive-parts-to-throw-insert} + +Если число неактивных кусков в партиции превышает значение `inactive_parts_to_throw_insert`, `INSERT` прерывается с исключением «Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts». + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: 0 (не ограничено). + +## inactive_parts_to_delay_insert {#inactive-parts-to-delay-insert} + +Если число неактивных кусков в партиции больше или равно значению `inactive_parts_to_delay_insert`, `INSERT` искусственно замедляется. Это полезно, когда сервер не может быстро очистить неактивные куски. + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: 0 (не ограничено). + ## max_delay_to_insert {#max-delay-to-insert} Величина в секундах, которая используется для расчета задержки `INSERT`, если число кусков в партиции превышает значение [parts_to_delay_insert](#parts-to-delay-insert). @@ -129,6 +149,39 @@ Eсли суммарное число активных кусков во все Стандартное значение Linux dirty_expire_centisecs - 30 секунд (максимальное время, которое записанные данные хранятся только в оперативной памяти), но при больших нагрузках на дисковую систему, данные могут быть записаны намного позже. Экспериментально было найдено время - 480 секунд, за которое гарантированно новый кусок будет записан на диск. +## replicated_fetches_http_connection_timeout {#replicated_fetches_http_connection_timeout} + +Тайм-аут HTTP-соединения (в секундах) для запросов на скачивание кусков. Наследуется из профиля по умолчанию [http_connection_timeout](./settings.md#http_connection_timeout), если не задан явно. + +Возможные значения: + +- 0 - используется значение `http_connection_timeout`. +- Любое положительное целое число. + +Значение по умолчанию: `0`. + +## replicated_fetches_http_send_timeout {#replicated_fetches_http_send_timeout} + +Тайм-аут (в секундах) для отправки HTTP-запросов на скачивание кусков. Наследуется из профиля по умолчанию [http_send_timeout](./settings.md#http_send_timeout), если не задан явно. + +Возможные значения: + +- 0 - используется значение `http_send_timeout`. +- Любое положительное целое число. + +Значение по умолчанию: `0`. + +## replicated_fetches_http_receive_timeout {#replicated_fetches_http_receive_timeout} + +Тайм-аут (в секундах) для получения HTTP-запросов на скачивание кусков. Наследуется из профиля по умолчанию [http_receive_timeout](./settings.md#http_receive_timeout), если не задан явно. + +Возможные значения: + +- 0 - используется значение `http_receive_timeout`. +- Любое положительное целое число. + +Значение по умолчанию: `0`. + ## max_bytes_to_merge_at_max_space_in_pool {#max-bytes-to-merge-at-max-space-in-pool} Максимальный суммарный размер кусков (в байтах) в одном слиянии, при наличии свободных ресурсов в фоновом пуле. @@ -193,4 +246,15 @@ Eсли суммарное число активных кусков во все Значение по умолчанию: -1 (неограниченно). +## allow_floating_point_partition_key {#allow_floating_point_partition_key} + +Позволяет использовать число с плавающей запятой в качестве ключа партиционирования. + +Возможные значения: + +- 0 — Ключ партиционирования с плавающей запятой не разрешен. +- 1 — Ключ партиционирования с плавающей запятой разрешен. + +Значение по умолчанию: `0`. + [Original article](https://clickhouse.tech/docs/ru/operations/settings/merge_tree_settings/) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 9d3ea4a809a..ada8ee91293 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -119,6 +119,16 @@ ClickHouse применяет настройку в тех случаях, ко Значение по умолчанию: 0. +## http_max_uri_size {#http-max-uri-size} + +Устанавливает максимальную длину URI в HTTP-запросе. + +Возможные значения: + +- Положительное целое. + +Значение по умолчанию: 1048576. + ## send_progress_in_http_headers {#settings-send_progress_in_http_headers} Включает или отключает HTTP-заголовки `X-ClickHouse-Progress` в ответах `clickhouse-server`. @@ -337,7 +347,31 @@ INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2; ## input_format_null_as_default {#settings-input-format-null-as-default} -Включает или отключает использование значений по умолчанию в случаях, когда во входных данных содержится `NULL`, но тип соответствующего столбца не `Nullable(T)` (для текстовых форматов). +Включает или отключает инициализацию [значениями по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) ячеек с [NULL](../../sql-reference/syntax.md#null-literal), если тип данных столбца не позволяет [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable). +Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки. + +Эта настройка используется для запросов [INSERT ... VALUES](../../sql-reference/statements/insert-into.md) для текстовых входных форматов. + +Возможные значения: + +- 0 — вставка `NULL` в столбец, не позволяющий хранить `NULL`, приведет к возникновению исключения. +- 1 — ячейки с `NULL` инициализируются значением столбца по умолчанию. + +Значение по умолчанию: `1`. + +## insert_null_as_default {#insert_null_as_default} + +Включает или отключает вставку [значений по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) вместо [NULL](../../sql-reference/syntax.md#null-literal) в столбцы, которые не позволяют [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable). +Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки. + +Эта настройка используется для запросов [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select). При этом подзапросы `SELECT` могут объединяться с помощью `UNION ALL`. + +Возможные значения: + +- 0 — вставка `NULL` в столбец, не позволяющий хранить `NULL`, приведет к возникновению исключения. +- 1 — вместо `NULL` вставляется значение столбца по умолчанию. + +Значение по умолчанию: `1`. ## input_format_skip_unknown_fields {#settings-input-format-skip-unknown-fields} @@ -759,6 +793,38 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING' log_query_threads=1 ``` +## log_comment {#settings-log-comment} + +Задаёт значение поля `log_comment` таблицы [system.query_log](../system-tables/query_log.md) и текст комментария в логе сервера. + +Может быть использована для улучшения читабельности логов сервера. Кроме того, помогает быстро выделить связанные с тестом запросы из `system.query_log` после запуска [clickhouse-test](../../development/tests.md). + +Возможные значения: + +- Любая строка не длиннее [max_query_size](#settings-max_query_size). При превышении длины сервер сгенерирует исключение. + +Значение по умолчанию: пустая строка. + +**Пример** + +Запрос: + +``` sql +SET log_comment = 'log_comment test', log_queries = 1; +SELECT 1; +SYSTEM FLUSH LOGS; +SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test' AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 2; +``` + +Результат: + +``` text +┌─type────────┬─query─────┐ +│ QueryStart │ SELECT 1; │ +│ QueryFinish │ SELECT 1; │ +└─────────────┴───────────┘ +``` + ## max_insert_block_size {#settings-max_insert_block_size} Формировать блоки указанного размера, при вставке в таблицу. @@ -812,8 +878,6 @@ log_query_threads=1 Значение по умолчанию: количество процессорных ядер без учёта Hyper-Threading. -Если на сервере обычно исполняется менее одного запроса SELECT одновременно, то выставите этот параметр в значение чуть меньше количества реальных процессорных ядер. - Для запросов, которые быстро завершаются из-за LIMIT-а, имеет смысл выставить max_threads поменьше. Например, если нужное количество записей находится в каждом блоке, то при max_threads = 8 будет считано 8 блоков, хотя достаточно было прочитать один. Чем меньше `max_threads`, тем меньше будет использоваться оперативки. @@ -1721,7 +1785,7 @@ ClickHouse генерирует исключение ## background_pool_size {#background_pool_size} -Задает количество потоков для выполнения фоновых операций в движках таблиц (например, слияния в таблицах c движком [MergeTree](../../engines/table-engines/mergetree-family/index.md)). Настройка применяется при запуске сервера ClickHouse и не может быть изменена во пользовательском сеансе. Настройка позволяет управлять загрузкой процессора и диска. Чем меньше пулл, тем ниже нагрузка на CPU и диск, при этом фоновые процессы замедляются, что может повлиять на скорость выполнения запроса. +Задает количество потоков для выполнения фоновых операций в движках таблиц (например, слияния в таблицах c движком [MergeTree](../../engines/table-engines/mergetree-family/index.md)). Настройка применяется при запуске сервера ClickHouse и не может быть изменена во пользовательском сеансе. Настройка позволяет управлять загрузкой процессора и диска. Чем меньше пул, тем ниже нагрузка на CPU и диск, при этом фоновые процессы работают с меньшей интенсивностью, что в конечном итоге может повлиять на производительность запросов, потому что сервер будет обрабатывать больше кусков. Допустимые значения: @@ -1760,6 +1824,19 @@ ClickHouse генерирует исключение - [Движок Distributed](../../engines/table-engines/special/distributed.md#distributed) - [Управление распределёнными таблицами](../../sql-reference/statements/system.md#query-language-system-distributed) +## insert_distributed_one_random_shard {#insert_distributed_one_random_shard} + +Включает или отключает режим вставки данных в [Distributed](../../engines/table-engines/special/distributed.md#distributed)) таблицу в случайный шард при отсутствии ключ шардирования. + +По умолчанию при вставке данных в `Distributed` таблицу с несколькими шардами и при отсутствии ключа шардирования сервер ClickHouse будет отклонять любой запрос на вставку данных. Когда `insert_distributed_one_random_shard = 1`, вставки принимаются, а данные записываются в случайный шард. + +Возможные значения: + +- 0 — если у таблицы несколько шардов, но ключ шардирования отсутствует, вставка данных отклоняется. +- 1 — если ключ шардирования отсутствует, то вставка данных осуществляется в случайный шард среди всех доступных шардов. + +Значение по умолчанию: `0`. + ## insert_shard_id {#insert_shard_id} Если не `0`, указывает, в какой шард [Distributed](../../engines/table-engines/special/distributed.md#distributed) таблицы данные будут вставлены синхронно. @@ -1990,6 +2067,16 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1; Значение по умолчанию: 16. +## background_fetches_pool_size {#background_fetches_pool_size} + +Задает количество потоков для скачивания кусков данных для [реплицируемых](../../engines/table-engines/mergetree-family/replication.md) таблиц. Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. Для использования в продакшене с частыми небольшими вставками или медленным кластером ZooKeeper рекомендуется использовать значение по умолчанию. + +Допустимые значения: + +- Положительное целое число. + +Значение по умолчанию: 8. + ## background_distributed_schedule_pool_size {#background_distributed_schedule_pool_size} Задает количество потоков для выполнения фоновых задач. Работает для таблиц с движком [Distributed](../../engines/table-engines/special/distributed.md). Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. @@ -2645,6 +2732,28 @@ SELECT * FROM test2; Значение по умолчанию: `0`. +## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously} + +Добавляет модификатор `SYNC` ко всем запросам `DROP` и `DETACH`. + +Возможные значения: + +- 0 — Запросы будут выполняться с задержкой. +- 1 — Запросы будут выполняться без задержки. + +Значение по умолчанию: `0`. + +## show_table_uuid_in_table_create_query_if_not_nil {#show_table_uuid_in_table_create_query_if_not_nil} + +Устанавливает отображение запроса `SHOW TABLE`. + +Возможные значения: + +- 0 — Запрос будет отображаться без UUID таблицы. +- 1 — Запрос будет отображаться с UUID таблицы. + +Значение по умолчанию: `0`. + ## allow_experimental_live_view {#allow-experimental-live-view} Включает экспериментальную возможность использования [LIVE-представлений](../../sql-reference/statements/create/view.md#live-view). @@ -2655,7 +2764,6 @@ SELECT * FROM test2; Значение по умолчанию: `0`. - ## live_view_heartbeat_interval {#live-view-heartbeat-interval} Задает интервал в секундах для периодической проверки существования [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view). @@ -2680,4 +2788,172 @@ SELECT * FROM test2; Значение по умолчанию: `60`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) \ No newline at end of file +## check_query_single_value_result {#check_query_single_value_result} + +Определяет уровень детализации результата для запросов [CHECK TABLE](../../sql-reference/statements/check-table.md#checking-mergetree-tables) для таблиц семейства `MergeTree`. + +Возможные значения: + +- 0 — запрос возвращает статус каждого куска данных таблицы. +- 1 — запрос возвращает статус таблицы в целом. + +Значение по умолчанию: `0`. + +## prefer_column_name_to_alias {#prefer-column-name-to-alias} + +Включает или отключает замену названий столбцов на псевдонимы (alias) в выражениях и секциях запросов, см. [Примечания по использованию синонимов](../../sql-reference/syntax.md#syntax-expression_aliases). Включите эту настройку, чтобы синтаксис псевдонимов в ClickHouse был более совместим с большинством других СУБД. + +Возможные значения: + +- 0 — псевдоним подставляется вместо имени столбца. +- 1 — псевдоним не подставляется вместо имени столбца. + +Значение по умолчанию: `0`. + +**Пример** + +Какие изменения привносит включение и выключение настройки: + +Запрос: + +```sql +SET prefer_column_name_to_alias = 0; +SELECT avg(number) AS number, max(number) FROM numbers(10); +``` + +Результат: + +```text +Received exception from server (version 21.5.1): +Code: 184. DB::Exception: Received from localhost:9000. DB::Exception: Aggregate function avg(number) is found inside another aggregate function in query: While processing avg(number) AS number. +``` + +Запрос: + +```sql +SET prefer_column_name_to_alias = 1; +SELECT avg(number) AS number, max(number) FROM numbers(10); +``` + +Результат: + +```text +┌─number─┬─max(number)─┐ +│ 4.5 │ 9 │ +└────────┴─────────────┘ +``` + +## limit {#limit} + +Устанавливает максимальное количество строк, возвращаемых запросом. Ограничивает сверху значение, установленное в запросе в секции [LIMIT](../../sql-reference/statements/select/limit.md#limit-clause). + +Возможные значения: + +- 0 — число строк не ограничено. +- Положительное целое число. + +Значение по умолчанию: `0`. + +## offset {#offset} + +Устанавливает количество строк, которые необходимо пропустить перед началом возврата строк из запроса. Суммируется со значением, установленным в запросе в секции [OFFSET](../../sql-reference/statements/select/offset.md#offset-fetch). + +Возможные значения: + +- 0 — строки не пропускаются. +- Положительное целое число. + +Значение по умолчанию: `0`. + +**Пример** + +Исходная таблица: + +``` sql +CREATE TABLE test (i UInt64) ENGINE = MergeTree() ORDER BY i; +INSERT INTO test SELECT number FROM numbers(500); +``` + +Запрос: + +``` sql +SET limit = 5; +SET offset = 7; +SELECT * FROM test LIMIT 10 OFFSET 100; +``` + +Результат: + +``` text +┌───i─┐ +│ 107 │ +│ 108 │ +│ 109 │ +└─────┘ +``` +## http_connection_timeout {#http_connection_timeout} + +Тайм-аут для HTTP-соединения (в секундах). + +Возможные значения: + +- 0 - бесконечный тайм-аут. +- Любое положительное целое число. + +Значение по умолчанию: `1`. + +## http_send_timeout {#http_send_timeout} + +Тайм-аут для отправки данных через HTTP-интерфейс (в секундах). + +Возможные значения: + +- 0 - бесконечный тайм-аут. +- Любое положительное целое число. + +Значение по умолчанию: `1800`. + +## http_receive_timeout {#http_receive_timeout} + +Тайм-аут для получения данных через HTTP-интерфейс (в секундах). + +Возможные значения: + +- 0 - бесконечный тайм-аут. +- Любое положительное целое число. + +Значение по умолчанию: `1800`. + +## optimize_fuse_sum_count_avg {#optimize_fuse_sum_count_avg} + +Позволяет объединить агрегатные функции с одинаковым аргументом. Запрос, содержащий по крайней мере две агрегатные функции: [sum](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum), [count](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count) или [avg](../../sql-reference/aggregate-functions/reference/avg.md#agg_function-avg) с одинаковым аргументом, перезаписывается как [sumCount](../../sql-reference/aggregate-functions/reference/sumcount.md#agg_function-sumCount). + +Возможные значения: + +- 0 — функции с одинаковым аргументом не объединяются. +- 1 — функции с одинаковым аргументом объединяются. + +Значение по умолчанию: `0`. + +**Пример** + +Запрос: + +``` sql +CREATE TABLE fuse_tbl(a Int8, b Int8) Engine = Log; +SET optimize_fuse_sum_count_avg = 1; +EXPLAIN SYNTAX SELECT sum(a), sum(b), count(b), avg(b) from fuse_tbl FORMAT TSV; +``` + +Результат: + +``` text +SELECT + sum(a), + sumCount(b).1, + sumCount(b).2, + (sumCount(b).1) / (sumCount(b).2) +FROM fuse_tbl +``` + +[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) diff --git a/docs/ru/operations/system-tables/clusters.md b/docs/ru/operations/system-tables/clusters.md index ddc6849b44d..6bfeb8aa818 100644 --- a/docs/ru/operations/system-tables/clusters.md +++ b/docs/ru/operations/system-tables/clusters.md @@ -4,12 +4,68 @@ Столбцы: -- `cluster` (String) — имя кластера. -- `shard_num` (UInt32) — номер шарда в кластере, начиная с 1. -- `shard_weight` (UInt32) — относительный вес шарда при записи данных. -- `replica_num` (UInt32) — номер реплики в шарде, начиная с 1. -- `host_name` (String) — хост, указанный в конфигурации. -- `host_address` (String) — TIP-адрес хоста, полученный из DNS. -- `port` (UInt16) — порт, на который обращаться для соединения с сервером. -- `user` (String) — имя пользователя, которого использовать для соединения с сервером. +- `cluster` ([String](../../sql-reference/data-types/string.md)) — имя кластера. +- `shard_num` ([UInt32](../../sql-reference/data-types/int-uint.md)) — номер шарда в кластере, начиная с 1. +- `shard_weight` ([UInt32](../../sql-reference/data-types/int-uint.md)) — относительный вес шарда при записи данных. +- `replica_num` ([UInt32](../../sql-reference/data-types/int-uint.md)) — номер реплики в шарде, начиная с 1. +- `host_name` ([String](../../sql-reference/data-types/string.md)) — хост, указанный в конфигурации. +- `host_address` ([String](../../sql-reference/data-types/string.md)) — TIP-адрес хоста, полученный из DNS. +- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт для соединения с сервером. +- `is_local` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, показывающий является ли хост локальным. +- `user` ([String](../../sql-reference/data-types/string.md)) — имя пользователя для соединения с сервером. +- `default_database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных по умолчанию. +- `errors_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество неудачных попыток хоста получить доступ к реплике. +- `slowdowns_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество замен реплики из-за долгого отсутствия ответа от нее при установке соединения при хеджированных запросах. +- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество секунд до момента, когда количество ошибок будет обнулено и реплика станет доступной. +**Пример** + +Запрос: + +```sql +SELECT * FROM system.clusters LIMIT 2 FORMAT Vertical; +``` + +Результат: + +```text +Row 1: +────── +cluster: test_cluster_two_shards +shard_num: 1 +shard_weight: 1 +replica_num: 1 +host_name: 127.0.0.1 +host_address: 127.0.0.1 +port: 9000 +is_local: 1 +user: default +default_database: +errors_count: 0 +slowdowns_count: 0 +estimated_recovery_time: 0 + +Row 2: +────── +cluster: test_cluster_two_shards +shard_num: 2 +shard_weight: 1 +replica_num: 1 +host_name: 127.0.0.2 +host_address: 127.0.0.2 +port: 9000 +is_local: 0 +user: default +default_database: +errors_count: 0 +slowdowns_count: 0 +estimated_recovery_time: 0 +``` + +**Смотрите также** + +- [Table engine Distributed](../../engines/table-engines/special/distributed.md) +- [Настройка distributed_replica_error_cap](../../operations/settings/settings.md#settings-distributed_replica_error_cap) +- [Настройка distributed_replica_error_half_life](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) + +[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/clusters) diff --git a/docs/ru/operations/system-tables/columns.md b/docs/ru/operations/system-tables/columns.md index af4cff85439..b8a0aef2299 100644 --- a/docs/ru/operations/system-tables/columns.md +++ b/docs/ru/operations/system-tables/columns.md @@ -4,7 +4,9 @@ С помощью этой таблицы можно получить информацию аналогично запросу [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table), но для многих таблиц сразу. -Таблица `system.columns` содержит столбцы (тип столбца указан в скобках): +Колонки [временных таблиц](../../sql-reference/statements/create/table.md#temporary-tables) содержатся в `system.columns` только в тех сессиях, в которых эти таблицы были созданы. Поле `database` у таких колонок пустое. + +Cтолбцы: - `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных. - `table` ([String](../../sql-reference/data-types/string.md)) — имя таблицы. @@ -23,3 +25,46 @@ - `is_in_sampling_key` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, показывающий включение столбца в ключ выборки. - `compression_codec` ([String](../../sql-reference/data-types/string.md)) — имя кодека сжатия. +**Пример** + +```sql +SELECT * FROM system.columns LIMIT 2 FORMAT Vertical; +``` + +```text +Row 1: +────── +database: system +table: aggregate_function_combinators +name: name +type: String +default_kind: +default_expression: +data_compressed_bytes: 0 +data_uncompressed_bytes: 0 +marks_bytes: 0 +comment: +is_in_partition_key: 0 +is_in_sorting_key: 0 +is_in_primary_key: 0 +is_in_sampling_key: 0 +compression_codec: + +Row 2: +────── +database: system +table: aggregate_function_combinators +name: is_internal +type: UInt8 +default_kind: +default_expression: +data_compressed_bytes: 0 +data_uncompressed_bytes: 0 +marks_bytes: 0 +comment: +is_in_partition_key: 0 +is_in_sorting_key: 0 +is_in_primary_key: 0 +is_in_sampling_key: 0 +compression_codec: +``` diff --git a/docs/ru/operations/system-tables/dictionaries.md b/docs/ru/operations/system-tables/dictionaries.md index 6a49904aae9..b865fea736f 100644 --- a/docs/ru/operations/system-tables/dictionaries.md +++ b/docs/ru/operations/system-tables/dictionaries.md @@ -21,6 +21,7 @@ - `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем. - `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. - `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — Для cache-словарей — процент закэшированных значений. +- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — Процент обращений к словарю, при которых значение было найдено. - `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре. - `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). - `source` ([String](../../sql-reference/data-types/string.md)) — Текст, описывающий [источник данных](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) для словаря. @@ -58,4 +59,3 @@ SELECT * FROM system.dictionaries │ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ ``` - diff --git a/docs/ru/operations/system-tables/replication_queue.md b/docs/ru/operations/system-tables/replication_queue.md index 56e8c695a21..2f9d80be16f 100644 --- a/docs/ru/operations/system-tables/replication_queue.md +++ b/docs/ru/operations/system-tables/replication_queue.md @@ -14,7 +14,17 @@ - `node_name` ([String](../../sql-reference/data-types/string.md)) — имя узла в ZooKeeper. -- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS` или `MUTATE_PARTS`. +- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: + + - `GET_PART` — скачать кусок с другой реплики. + - `ATTACH_PART` — присоединить кусок. Задача может быть выполнена и с куском из нашей собственной реплики (если он находится в папке `detached`). Эта задача практически идентична задаче `GET_PART`, лишь немного оптимизирована. + - `MERGE_PARTS` — выполнить слияние кусков. + - `DROP_RANGE` — удалить куски в партициях из указнного диапазона. + - `CLEAR_COLUMN` — удалить указанный столбец из указанной партиции. Примечание: не используется с 20.4. + - `CLEAR_INDEX` — удалить указанный индекс из указанной партиции. Примечание: не используется с 20.4. + - `REPLACE_RANGE` — удалить указанный диапазон кусков и заменить их на новые. + - `MUTATE_PART` — применить одну или несколько мутаций к куску. + - `ALTER_METADATA` — применить изменения структуры таблицы в результате запросов с выражением `ALTER`. - `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение. @@ -77,4 +87,3 @@ last_postpone_time: 1970-01-01 03:00:00 **Смотрите также** - [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated) - diff --git a/docs/ru/operations/system-tables/stack_trace.md b/docs/ru/operations/system-tables/stack_trace.md index 58d0a1c4b6a..338c14534cf 100644 --- a/docs/ru/operations/system-tables/stack_trace.md +++ b/docs/ru/operations/system-tables/stack_trace.md @@ -6,9 +6,10 @@ Столбцы: -- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Идентификатор потока. -- `query_id` ([String](../../sql-reference/data-types/string.md)) — Идентификатор запроса. Может быть использован для получения подробной информации о выполненном запросе из системной таблицы [query_log](#system_tables-query_log). -- `trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — [Трассировка стека](https://en.wikipedia.org/wiki/Stack_trace). Представляет собой список физических адресов, по которым расположены вызываемые методы. +- `thread_name` ([String](../../sql-reference/data-types/string.md)) — имя потока. +- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — идентификатор потока. +- `query_id` ([String](../../sql-reference/data-types/string.md)) — идентификатор запроса. Может быть использован для получения подробной информации о выполненном запросе из системной таблицы [query_log](#system_tables-query_log). +- `trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — [трассировка стека](https://en.wikipedia.org/wiki/Stack_trace). Представляет собой список физических адресов, по которым расположены вызываемые методы. **Пример** @@ -21,12 +22,14 @@ SET allow_introspection_functions = 1; Получение символов из объектных файлов ClickHouse: ``` sql -WITH arrayMap(x -> demangle(addressToSymbol(x)), trace) AS all SELECT thread_id, query_id, arrayStringConcat(all, '\n') AS res FROM system.stack_trace LIMIT 1 \G +WITH arrayMap(x -> demangle(addressToSymbol(x)), trace) AS all SELECT thread_name, thread_id, query_id, arrayStringConcat(all, '\n') AS res FROM system.stack_trace LIMIT 1 \G; ``` ``` text Row 1: ────── +thread_name: clickhouse-serv + thread_id: 686 query_id: 1a11f70b-626d-47c1-b948-f9c7b206395d res: sigqueue @@ -51,12 +54,14 @@ __clone Получение имен файлов и номеров строк в исходном коде ClickHouse: ``` sql -WITH arrayMap(x -> addressToLine(x), trace) AS all, arrayFilter(x -> x LIKE '%/dbms/%', all) AS dbms SELECT thread_id, query_id, arrayStringConcat(notEmpty(dbms) ? dbms : all, '\n') AS res FROM system.stack_trace LIMIT 1 \G +WITH arrayMap(x -> addressToLine(x), trace) AS all, arrayFilter(x -> x LIKE '%/dbms/%', all) AS dbms SELECT thread_name, thread_id, query_id, arrayStringConcat(notEmpty(dbms) ? dbms : all, '\n') AS res FROM system.stack_trace LIMIT 1 \G; ``` ``` text Row 1: ────── +thread_name: clickhouse-serv + thread_id: 686 query_id: cad353e7-1c29-4b2e-949f-93e597ab7a54 res: /lib/x86_64-linux-gnu/libc-2.27.so @@ -78,10 +83,9 @@ res: /lib/x86_64-linux-gnu/libc-2.27.so /lib/x86_64-linux-gnu/libc-2.27.so ``` -**См. также** - -- [Функции интроспекции](../../sql-reference/functions/introspection.md) — Что такое функции интроспекции и как их использовать. -- [system.trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) — Содержит трассировки стека, собранные профилировщиком выборочных запросов. -- [arrayMap](../../sql-reference/functions/array-functions.md#array-map) — Описание и пример использования функции `arrayMap`. -- [arrayFilter](../../sql-reference/functions/array-functions.md#array-filter) — Описание и пример использования функции `arrayFilter`. +**Смотрите также** +- [Функции интроспекции](../../sql-reference/functions/introspection.md) — описание функций интроспекции и примеры использования. +- [system.trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) — системная таблица, содержащая трассировки стека, собранные профилировщиком выборочных запросов. +- [arrayMap](../../sql-reference/functions/array-functions.md#array-map) — описание и пример использования функции `arrayMap`. +- [arrayFilter](../../sql-reference/functions/array-functions.md#array-filter) — описание и пример использования функции `arrayFilter`. diff --git a/docs/ru/operations/system-tables/tables.md b/docs/ru/operations/system-tables/tables.md index 42e55b1f6b7..3dec1e7d940 100644 --- a/docs/ru/operations/system-tables/tables.md +++ b/docs/ru/operations/system-tables/tables.md @@ -1,39 +1,102 @@ # system.tables {#system-tables} -Содержит метаданные каждой таблицы, о которой знает сервер. Отсоединённые таблицы не отображаются в `system.tables`. +Содержит метаданные каждой таблицы, о которой знает сервер. -Эта таблица содержит следующие столбцы (тип столбца показан в скобках): +Отсоединённые таблицы ([DETACH](../../sql-reference/statements/detach.md)) не отображаются в `system.tables`. -- `database String` — имя базы данных, в которой находится таблица. -- `name` (String) — имя таблицы. -- `engine` (String) — движок таблицы (без параметров). -- `is_temporary` (UInt8) — флаг, указывающий на то, временная это таблица или нет. -- `data_path` (String) — путь к данным таблицы в файловой системе. -- `metadata_path` (String) — путь к табличным метаданным в файловой системе. -- `metadata_modification_time` (DateTime) — время последней модификации табличных метаданных. -- `dependencies_database` (Array(String)) — зависимости базы данных. -- `dependencies_table` (Array(String)) — табличные зависимости (таблицы [MaterializedView](../../engines/table-engines/special/materializedview.md), созданные на базе текущей таблицы). -- `create_table_query` (String) — запрос, которым создавалась таблица. -- `engine_full` (String) — параметры табличного движка. -- `partition_key` (String) — ключ партиционирования таблицы. -- `sorting_key` (String) — ключ сортировки таблицы. -- `primary_key` (String) - первичный ключ таблицы. -- `sampling_key` (String) — ключ сэмплирования таблицы. -- `storage_policy` (String) - политика хранения данных: +Информация о [временных таблицах](../../sql-reference/statements/create/table.md#temporary-tables) содержится в `system.tables` только в тех сессиях, в которых эти таблицы были созданы. Поле `database` у таких таблиц пустое, а флаг `is_temporary` включен. + +Столбцы: + +- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных, в которой находится таблица. +- `name` ([String](../../sql-reference/data-types/string.md)) — имя таблицы. +- `engine` ([String](../../sql-reference/data-types/string.md)) — движок таблицы (без параметров). +- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, указывающий на то, временная это таблица или нет. +- `data_path` ([String](../../sql-reference/data-types/string.md)) — путь к данным таблицы в файловой системе. +- `metadata_path` ([String](../../sql-reference/data-types/string.md)) — путь к табличным метаданным в файловой системе. +- `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время последней модификации табличных метаданных. +- `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — зависимости базы данных. +- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — табличные зависимости (таблицы [MaterializedView](../../engines/table-engines/special/materializedview.md), созданные на базе текущей таблицы). +- `create_table_query` ([String](../../sql-reference/data-types/string.md)) — запрос, при помощи которого создавалась таблица. +- `engine_full` ([String](../../sql-reference/data-types/string.md)) — параметры табличного движка. +- `partition_key` ([String](../../sql-reference/data-types/string.md)) — ключ партиционирования таблицы. +- `sorting_key` ([String](../../sql-reference/data-types/string.md)) — ключ сортировки таблицы. +- `primary_key` ([String](../../sql-reference/data-types/string.md)) - первичный ключ таблицы. +- `sampling_key` ([String](../../sql-reference/data-types/string.md)) — ключ сэмплирования таблицы. +- `storage_policy` ([String](../../sql-reference/data-types/string.md)) - политика хранения данных: - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - [Distributed](../../engines/table-engines/special/distributed.md#distributed) -- `total_rows` (Nullable(UInt64)) - общее количество строк, если есть возможность быстро определить точное количество строк в таблице, в противном случае `Null` (включая базовую таблицу `Buffer`). +- `total_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - общее количество строк, если есть возможность быстро определить точное количество строк в таблице, в противном случае `NULL` (включая базовую таблицу `Buffer`). -- `total_bytes` (Nullable(UInt64)) - общее количество байт, если можно быстро определить точное количество байт для таблицы на накопителе, в противном случае `Null` (**не включает** в себя никакого базового хранилища). +- `total_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - общее количество байт, если можно быстро определить точное количество байт для таблицы на накопителе, в противном случае `NULL` (не включает в себя никакого базового хранилища). - Если таблица хранит данные на диске, возвращает используемое пространство на диске (т. е. сжатое). - Если таблица хранит данные в памяти, возвращает приблизительное количество используемых байт в памяти. -- `lifetime_rows` (Nullable(UInt64)) - общее количество строк, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). +- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - общее количество строк, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). -- `lifetime_bytes` (Nullable(UInt64)) - общее количество байт, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). +- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - общее количество байт, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). + +- `comment` ([String](../../sql-reference/data-types/string.md)) — комментарий к таблице. Таблица `system.tables` используется при выполнении запроса `SHOW TABLES`. +**Пример** + +```sql +SELECT * FROM system.tables LIMIT 2 FORMAT Vertical; +``` + +```text +Row 1: +────── +database: base +name: t1 +uuid: 81b1c20a-b7c6-4116-a2ce-7583fb6b6736 +engine: MergeTree +is_temporary: 0 +data_paths: ['/var/lib/clickhouse/store/81b/81b1c20a-b7c6-4116-a2ce-7583fb6b6736/'] +metadata_path: /var/lib/clickhouse/store/461/461cf698-fd0b-406d-8c01-5d8fd5748a91/t1.sql +metadata_modification_time: 2021-01-25 19:14:32 +dependencies_database: [] +dependencies_table: [] +create_table_query: CREATE TABLE base.t1 (`n` UInt64) ENGINE = MergeTree ORDER BY n SETTINGS index_granularity = 8192 +engine_full: MergeTree ORDER BY n SETTINGS index_granularity = 8192 +partition_key: +sorting_key: n +primary_key: n +sampling_key: +storage_policy: default +total_rows: 1 +total_bytes: 99 +lifetime_rows: ᴺᵁᴸᴸ +lifetime_bytes: ᴺᵁᴸᴸ +comment: + +Row 2: +────── +database: default +name: 53r93yleapyears +uuid: 00000000-0000-0000-0000-000000000000 +engine: MergeTree +is_temporary: 0 +data_paths: ['/var/lib/clickhouse/data/default/53r93yleapyears/'] +metadata_path: /var/lib/clickhouse/metadata/default/53r93yleapyears.sql +metadata_modification_time: 2020-09-23 09:05:36 +dependencies_database: [] +dependencies_table: [] +create_table_query: CREATE TABLE default.`53r93yleapyears` (`id` Int8, `febdays` Int8) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192 +engine_full: MergeTree ORDER BY id SETTINGS index_granularity = 8192 +partition_key: +sorting_key: id +primary_key: id +sampling_key: +storage_policy: default +total_rows: 2 +total_bytes: 155 +lifetime_rows: ᴺᵁᴸᴸ +lifetime_bytes: ᴺᵁᴸᴸ +comment: +``` diff --git a/docs/ru/operations/system-tables/trace_log.md b/docs/ru/operations/system-tables/trace_log.md index 3d22e4eabfd..6d8130c1d00 100644 --- a/docs/ru/operations/system-tables/trace_log.md +++ b/docs/ru/operations/system-tables/trace_log.md @@ -18,10 +18,12 @@ ClickHouse создает эту таблицу когда утсановлен Во время соединения с сервером через `clickhouse-client`, вы видите строку похожую на `Connected to ClickHouse server version 19.18.1 revision 54429.`. Это поле содержит номер после `revision`, но не содержит строку после `version`. -- `timer_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип таймера: +- `trace_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип трассировки: - - `Real` означает wall-clock время. - - `CPU` означает относительное CPU время. + - `Real` — сбор трассировок стека адресов вызова по времени wall-clock. + - `CPU` — сбор трассировок стека адресов вызова по времени CPU. + - `Memory` — сбор выделенной памяти, когда ее размер превышает относительный инкремент. + - `MemorySample` — сбор случайно выделенной памяти. - `thread_number`([UInt32](../../sql-reference/data-types/int-uint.md)) — идентификатор треда. diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md deleted file mode 100644 index 4535767e8e0..00000000000 --- a/docs/ru/operations/tips.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -toc_priority: 58 -toc_title: "Советы по эксплуатации" ---- - -# Советы по эксплуатации {#sovety-po-ekspluatatsii} - -## CPU Scaling Governor {#cpu-scaling-governor} - -Всегда используйте `performance` scaling governor. `ondemand` scaling governor работает намного хуже при постоянно высоком спросе. - -``` bash -$ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor -``` - -## Ограничение CPU {#ogranichenie-cpu} - -Процессоры могут перегреваться. С помощью `dmesg` можно увидеть, если тактовая частота процессора была ограничена из-за перегрева. -Также ограничение может устанавливаться снаружи на уровне дата-центра. С помощью `turbostat` можно за этим наблюдать под нагрузкой. - -## Оперативная память {#operativnaia-pamiat} - -Для небольших объёмов данных (до ~200 Гб в сжатом виде) лучше всего использовать столько памяти не меньше, чем объём данных. -Для больших объёмов данных, при выполнении интерактивных (онлайн) запросов, стоит использовать разумный объём оперативной памяти (128 Гб или более) для того, чтобы горячее подмножество данных поместилось в кеше страниц. -Даже для объёмов данных в ~50 Тб на сервер, использование 128 Гб оперативной памяти намного лучше для производительности выполнения запросов, чем 64 Гб. - -Не выключайте overcommit. Значение `cat /proc/sys/vm/overcommit_memory` должно быть 0 or 1. Выполните: - -``` bash -$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory -``` - -## Huge Pages {#huge-pages} - -Механизм прозрачных huge pages нужно отключить. Он мешает работе аллокаторов памяти, что приводит к значительной деградации производительности. - -``` bash -$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled -``` - -С помощью `perf top` можно наблюдать за временем, проведенном в ядре операционной системы для управления памятью. -Постоянные huge pages так же не нужно аллоцировать. - -## Подсистема хранения {#podsistema-khraneniia} - -Если ваш бюджет позволяет использовать SSD, используйте SSD. -В противном случае используйте HDD. SATA HDDs 7200 RPM подойдут. - -Предпочитайте много серверов с локальными жесткими дисками вместо меньшего числа серверов с подключенными дисковыми полками. -Но для хранения архивов с редкими запросами полки всё же подходят. - -## RAID {#raid} - -При использовании HDD можно объединить их RAID-10, RAID-5, RAID-6 или RAID-50. -Лучше использовать программный RAID в Linux (`mdadm`). Лучше не использовать LVM. -При создании RAID-10, нужно выбрать `far` расположение. -Если бюджет позволяет, лучше выбрать RAID-10. - -На более чем 4 дисках вместо RAID-5 нужно использовать RAID-6 (предпочтительнее) или RAID-50. -При использовании RAID-5, RAID-6 или RAID-50, нужно всегда увеличивать stripe_cache_size, так как значение по умолчанию выбрано не самым удачным образом. - -``` bash -$ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size -``` - -Точное число стоит вычислять из числа устройств и размер блока по формуле: `2 * num_devices * chunk_size_in_bytes / 4096`. - -Размер блока в 1024 Кб подходит для всех конфигураций RAID. -Никогда не указывайте слишком маленький или слишком большой размер блока. - -На SSD можно использовать RAID-0. -Вне зависимости от использования RAID, всегда используйте репликацию для безопасности данных. - -Включите NCQ с длинной очередью. Для HDD стоит выбрать планировщик CFQ, а для SSD — noop. Не стоит уменьшать настройку readahead. -На HDD стоит включать кеш записи. - -## Файловая система {#failovaia-sistema} - -Ext4 самый проверенный вариант. Укажите опции монтирования `noatime,nobarrier`. -XFS также подходит, но не так тщательно протестирована в сочетании с ClickHouse. -Большинство других файловых систем также должны нормально работать. Файловые системы с отложенной аллокацией работают лучше. - -## Ядро Linux {#iadro-linux} - -Не используйте слишком старое ядро Linux. - -## Сеть {#set} - -При использовании IPv6, стоит увеличить размер кеша маршрутов. -Ядра Linux до 3.2 имели массу проблем в реализации IPv6. - -Предпочитайте как минимум 10 Гбит сеть. 1 Гбит также будет работать, но намного хуже для починки реплик с десятками терабайт данных или для обработки распределенных запросов с большим объёмом промежуточных данных. - -## ZooKeeper {#zookeeper} - -Вероятно вы уже используете ZooKeeper для других целей. Можно использовать ту же инсталляцию ZooKeeper, если она не сильно перегружена. - -Лучше использовать свежую версию ZooKeeper, как минимум 3.4.9. Версия в стабильных дистрибутивах Linux может быть устаревшей. - -Никогда не используете написанные вручную скрипты для переноса данных между разными ZooKeeper кластерами, потому что результат будет некорректный для sequential нод. Никогда не используйте утилиту «zkcopy», по той же причине: https://github.com/ksprojects/zkcopy/issues/15 - -Если вы хотите разделить существующий ZooKeeper кластер на два, правильный способ - увеличить количество его реплик, а затем переконфигурировать его как два независимых кластера. - -Не запускайте ZooKeeper на тех же серверах, что и ClickHouse. Потому что ZooKeeper очень чувствителен к задержкам, а ClickHouse может использовать все доступные системные ресурсы. - -С настройками по умолчанию, ZooKeeper является бомбой замедленного действия: - -> Сервер ZooKeeper не будет удалять файлы со старыми снепшоты и логами при использовании конфигурации по умолчанию (см. autopurge), это является ответственностью оператора. - -Эту бомбу нужно обезвредить. - -Далее описана конфигурация ZooKeeper (3.5.1), используемая в боевом окружении Яндекс.Метрики на момент 20 мая 2017 года: - -zoo.cfg: - -``` bash -# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html - -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=30000 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=10 - -maxClientCnxns=2000 - -maxSessionTimeout=60000000 -# the directory where the snapshot is stored. -dataDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/data -# Place the dataLogDir to a separate physical disc for better performance -dataLogDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/logs - -autopurge.snapRetainCount=10 -autopurge.purgeInterval=1 - - -# To avoid seeks ZooKeeper allocates space in the transaction log file in -# blocks of preAllocSize kilobytes. The default block size is 64M. One reason -# for changing the size of the blocks is to reduce the block size if snapshots -# are taken more often. (Also, see snapCount). -preAllocSize=131072 - -# Clients can submit requests faster than ZooKeeper can process them, -# especially if there are a lot of clients. To prevent ZooKeeper from running -# out of memory due to queued requests, ZooKeeper will throttle clients so that -# there is no more than globalOutstandingLimit outstanding requests in the -# system. The default limit is 1,000.ZooKeeper logs transactions to a -# transaction log. After snapCount transactions are written to a log file a -# snapshot is started and a new transaction log file is started. The default -# snapCount is 10,000. -snapCount=3000000 - -# If this option is defined, requests will be will logged to a trace file named -# traceFile.year.month.day. -#traceFile= - -# Leader accepts client connections. Default value is "yes". The leader machine -# coordinates updates. For higher update throughput at thes slight expense of -# read throughput the leader can be configured to not accept clients and focus -# on coordination. -leaderServes=yes - -standaloneEnabled=false -dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/zoo.cfg.dynamic -``` - -Версия Java: - -``` text -Java(TM) SE Runtime Environment (build 1.8.0_25-b17) -Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) -``` - -Параметры JVM: - -``` bash -NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }} -ZOOCFGDIR=/etc/$NAME/conf - -# TODO this is really ugly -# How to find out, which jars are needed? -# seems, that log4j requires the log4j.properties file to be in the classpath -CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper/zookeeper-3.5.1-metrika.jar:/usr/share/zookeeper/slf4j-log4j12-1.7.5.jar:/usr/share/zookeeper/slf4j-api-1.7.5.jar:/usr/share/zookeeper/servlet-api-2.5-20081211.jar:/usr/share/zookeeper/netty-3.7.0.Final.jar:/usr/share/zookeeper/log4j-1.2.16.jar:/usr/share/zookeeper/jline-2.11.jar:/usr/share/zookeeper/jetty-util-6.1.26.jar:/usr/share/zookeeper/jetty-6.1.26.jar:/usr/share/zookeeper/javacc.jar:/usr/share/zookeeper/jackson-mapper-asl-1.9.11.jar:/usr/share/zookeeper/jackson-core-asl-1.9.11.jar:/usr/share/zookeeper/commons-cli-1.2.jar:/usr/src/java/lib/*.jar:/usr/etc/zookeeper" - -ZOOCFG="$ZOOCFGDIR/zoo.cfg" -ZOO_LOG_DIR=/var/log/$NAME -USER=zookeeper -GROUP=zookeeper -PIDDIR=/var/run/$NAME -PIDFILE=$PIDDIR/$NAME.pid -SCRIPTNAME=/etc/init.d/$NAME -JAVA=/usr/bin/java -ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" -ZOO_LOG4J_PROP="INFO,ROLLINGFILE" -JMXLOCALONLY=false -JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '{{' }} '}}' }} \ - -Xmx{{ '{{' }} cluster.get('xmx','1G') {{ '{{' }} '}}' }} \ - -Xloggc:/var/log/$NAME/zookeeper-gc.log \ - -XX:+UseGCLogFileRotation \ - -XX:NumberOfGCLogFiles=16 \ - -XX:GCLogFileSize=16M \ - -verbose:gc \ - -XX:+PrintGCTimeStamps \ - -XX:+PrintGCDateStamps \ - -XX:+PrintGCDetails - -XX:+PrintTenuringDistribution \ - -XX:+PrintGCApplicationStoppedTime \ - -XX:+PrintGCApplicationConcurrentTime \ - -XX:+PrintSafepointStatistics \ - -XX:+UseParNewGC \ - -XX:+UseConcMarkSweepGC \ --XX:+CMSParallelRemarkEnabled" -``` - -Salt init: - -``` text -description "zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }} centralized coordination service" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -limit nofile 8192 8192 - -pre-start script - [ -r "/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment" ] || exit 0 - . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment - [ -d $ZOO_LOG_DIR ] || mkdir -p $ZOO_LOG_DIR - chown $USER:$GROUP $ZOO_LOG_DIR -end script - -script - . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment - [ -r /etc/default/zookeeper ] && . /etc/default/zookeeper - if [ -z "$JMXDISABLE" ]; then - JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY" - fi - exec start-stop-daemon --start -c $USER --exec $JAVA --name zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }} \ - -- -cp $CLASSPATH $JAVA_OPTS -Dzookeeper.log.dir=${ZOO_LOG_DIR} \ - -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG -end script -``` - diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md new file mode 120000 index 00000000000..9b3413bdbc3 --- /dev/null +++ b/docs/ru/operations/tips.md @@ -0,0 +1 @@ +../../en/operations/tips.md \ No newline at end of file diff --git a/docs/ru/operations/update.md b/docs/ru/operations/update.md index 5c187ed1604..a3e87b52ede 100644 --- a/docs/ru/operations/update.md +++ b/docs/ru/operations/update.md @@ -3,7 +3,7 @@ toc_priority: 47 toc_title: "Обновление ClickHouse" --- -# Обновление ClickHouse {#obnovlenie-clickhouse} +# Обновление ClickHouse {#clickhouse-upgrade} Если ClickHouse установлен с помощью deb-пакетов, выполните следующие команды на сервере: @@ -15,4 +15,17 @@ $ sudo service clickhouse-server restart Если ClickHouse установлен не из рекомендуемых deb-пакетов, используйте соответствующий метод обновления. -ClickHouse не поддерживает распределенное обновление. Операция должна выполняться последовательно на каждом отдельном сервере. Не обновляйте все серверы в кластере одновременно, иначе кластер становится недоступным в течение некоторого времени. +!!! note "Примечание" + Вы можете обновить сразу несколько серверов, кроме случая, когда все реплики одного шарда отключены. + +Обновление ClickHouse до определенной версии: + +**Пример** + +`xx.yy.a.b` — это номер текущей стабильной версии. Последнюю стабильную версию можно узнать [здесь](https://github.com/ClickHouse/ClickHouse/releases) + +```bash +$ sudo apt-get update +$ sudo apt-get install clickhouse-server=xx.yy.a.b clickhouse-client=xx.yy.a.b clickhouse-common-static=xx.yy.a.b +$ sudo service clickhouse-server restart +``` diff --git a/docs/ru/operations/utilities/clickhouse-compressor.md b/docs/ru/operations/utilities/clickhouse-compressor.md new file mode 100644 index 00000000000..d7f6862a62c --- /dev/null +++ b/docs/ru/operations/utilities/clickhouse-compressor.md @@ -0,0 +1,27 @@ +## ClickHouse compressor + +Simple program for data compression and decompression in ClickHouse way. + +### Examples + +Compress data with LZ4: +``` +$ ./clickhouse-compressor < input_file > output_file +``` + +Decompress data from LZ4 format: +``` +$ ./clickhouse-compressor --decompress < input_file > output_file +``` + +Compress data with ZSTD at level 5: + +``` +$ ./clickhouse-compressor --codec 'ZSTD(5)' < input_file > output_file +``` + +Compress data with Delta of four bytes and ZSTD level 10. + +``` +$ ./clickhouse-compressor --codec 'Delta(4)' --codec 'ZSTD(10)' < input_file > output_file +``` diff --git a/docs/ru/operations/utilities/clickhouse-format.md b/docs/ru/operations/utilities/clickhouse-format.md new file mode 100644 index 00000000000..43043fcc1d5 --- /dev/null +++ b/docs/ru/operations/utilities/clickhouse-format.md @@ -0,0 +1,98 @@ +--- +toc_priority: 65 +toc_title: clickhouse-format +--- + +# clickhouse-format {#clickhouse-format} + +Позволяет форматировать входящие запросы. + +Ключи: + +- `--help` или`-h` — выводит описание ключей. +- `--hilite` — добавляет подсветку синтаксиса с экранированием символов. +- `--oneline` — форматирование в одну строку. +- `--quiet` или `-q` — проверяет синтаксис без вывода результата. +- `--multiquery` or `-n` — поддерживает несколько запросов в одной строке. +- `--obfuscate` — обфусцирует вместо форматирования. +- `--seed <строка>` — задает строку, которая определяет результат обфускации. +- `--backslash` — добавляет обратный слеш в конце каждой строки отформатированного запроса. Удобно использовать если многострочный запрос скопирован из интернета или другого источника и его нужно выполнить из командной строки. + +## Примеры {#examples} + +1. Подсветка синтаксиса и форматирование в одну строку: + +```bash +$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);" +``` + +Результат: + +```sql +SELECT sum(number) FROM numbers(5) +``` + +2. Несколько запросов в одной строке: + +```bash +$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Результат: + +```text +SELECT * +FROM +( + SELECT 1 AS x + UNION ALL + SELECT 1 + UNION DISTINCT + SELECT 3 +) +; +``` + +3. Обфускация: + +```bash +$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Результат: + +```text +SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; +``` + +Тот же запрос с другой инициализацией обфускатора: + +```bash +$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Результат: + +```text +SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END; +``` + +4. Добавление обратного слеша: + +```bash +$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Результат: + +```text +SELECT * \ +FROM \ +( \ + SELECT 1 AS x \ + UNION ALL \ + SELECT 1 \ + UNION DISTINCT \ + SELECT 3 \ +) +``` diff --git a/docs/ru/operations/utilities/index.md b/docs/ru/operations/utilities/index.md index fa257fb4b1a..88bd0305386 100644 --- a/docs/ru/operations/utilities/index.md +++ b/docs/ru/operations/utilities/index.md @@ -6,6 +6,10 @@ toc_title: "Обзор" # Утилиты ClickHouse {#utility-clickhouse} -- [clickhouse-local](clickhouse-local.md) +- [clickhouse-local](clickhouse-local.md) - позволяет выполнять SQL-запросы над данными без остановки сервера ClickHouse, подобно утилите `awk`. - [clickhouse-copier](clickhouse-copier.md) - копирует (и перешардирует) данные с одного кластера на другой. - +- [clickhouse-benchmark](../../operations/utilities/clickhouse-benchmark.md) — устанавливает соединение с сервером ClickHouse и запускает циклическое выполнение указанных запросов. +- [clickhouse-format](../../operations/utilities/clickhouse-format.md) — позволяет форматировать входящие запросы. +- [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — обфусцирует данные. +- [ClickHouse compressor](../../operations/utilities/clickhouse-compressor.md) — упаковывает и распаковывает данные. +- [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — прокси-сервер для ODBC. diff --git a/docs/ru/operations/utilities/odbc-bridge.md b/docs/ru/operations/utilities/odbc-bridge.md new file mode 100644 index 00000000000..39c796c10c1 --- /dev/null +++ b/docs/ru/operations/utilities/odbc-bridge.md @@ -0,0 +1,38 @@ +# clickhouse-odbc-bridge + +Simple HTTP-server which works like a proxy for ODBC driver. The main motivation +was possible segfaults or another faults in ODBC implementations, which can +crash whole clickhouse-server process. + +This tool works via HTTP, not via pipes, shared memory, or TCP because: +- It's simpler to implement +- It's simpler to debug +- jdbc-bridge can be implemented in the same way + +## Usage + +`clickhouse-server` use this tool inside odbc table function and StorageODBC. +However it can be used as standalone tool from command line with the following +parameters in POST-request URL: +- `connection_string` -- ODBC connection string. +- `columns` -- columns in ClickHouse NamesAndTypesList format, name in backticks, + type as string. Name and type are space separated, rows separated with + newline. +- `max_block_size` -- optional parameter, sets maximum size of single block. +Query is send in post body. Response is returned in RowBinary format. + +## Example: + +```bash +$ clickhouse-odbc-bridge --http-port 9018 --daemon + +$ curl -d "query=SELECT PageID, ImpID, AdType FROM Keys ORDER BY PageID, ImpID" --data-urlencode "connection_string=DSN=ClickHouse;DATABASE=stat" --data-urlencode "columns=columns format version: 1 +3 columns: +\`PageID\` String +\`ImpID\` String +\`AdType\` String +" "http://localhost:9018/" > result.txt + +$ cat result.txt # Result in RowBinary format +12246623837185725195925621517 +``` diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index eb52fa9bc75..74f9d1c1c05 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -27,6 +27,40 @@ toc_title: "Комбинаторы агрегатных функций" Комбинаторы -If и -Array можно сочетать. При этом, должен сначала идти Array, а потом If. Примеры: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Из-за такого порядка получается, что аргумент cond не должен быть массивом. +## -SimpleState {#agg-functions-combinator-simplestate} + +При использовании этого комбинатора агрегатная функция возвращает то же значение, но типа [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md). Текущее значение функции может храниться в таблице для последующей работы с таблицами семейства [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md). + +**Синтаксис** + +``` sql +SimpleState(x) +``` + +**Аргументы** + +- `x` — параметры агрегатной функции. + +**Возвращаемое значение** + +Значение агрегатной функции типа `SimpleAggregateFunction(...)`. + +**Пример** + +Запрос: + +``` sql +WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1); +``` + +Результат: + +``` text +┌─toTypeName(c)────────────────────────┬─c─┐ +│ SimpleAggregateFunction(any, UInt64) │ 0 │ +└──────────────────────────────────────┴───┘ +``` + ## -State {#state} В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference/uniq.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации. @@ -247,4 +281,3 @@ FROM people │ [3,2] │ [11.5,12.949999809265137] │ └────────┴───────────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md index e5162b63b88..508c8de2a58 100644 --- a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md @@ -253,7 +253,7 @@ windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN) **Параметры** -- `window` — ширина скользящего окна по времени. Единица измерения зависит от `timestamp` и может варьироваться. Должно соблюдаться условие `timestamp события cond2 <= timestamp события cond1 + window`. +- `window` — ширина скользящего окна по времени. Это время между первым и последним условием. Единица измерения зависит от `timestamp` и может варьироваться. Должно соблюдаться условие `timestamp события cond1 <= timestamp события cond2 <= ... <= timestamp события condN <= timestamp события cond1 + window`. - `mode` — необязательный параметр. Может быть установленно несколько значений одновременно. - `'strict'` — не учитывать подряд идущие повторяющиеся события. - `'strict_order'` — запрещает посторонние события в искомой последовательности. Например, при поиске цепочки `A->B->C` в `A->B->D->C` поиск будет остановлен на `D` и функция вернет 2. @@ -311,7 +311,7 @@ FROM GROUP BY user_id ) GROUP BY level -ORDER BY level ASC +ORDER BY level ASC; ``` ## retention {#retention} diff --git a/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md b/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md index b025a248f3c..6825847f256 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md @@ -6,6 +6,9 @@ toc_priority: 141 Суммирует арифметическую разницу между последовательными строками. Если разница отрицательна — она будет проигнорирована. +!!! info "Примечание" + Чтобы эта функция работала должным образом, исходные данные должны быть отсортированы. В [материализованном представлении](../../../sql-reference/statements/create/view.md#materialized) вместо нее рекомендуется использовать [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp). + **Синтаксис** ``` sql @@ -18,7 +21,8 @@ deltaSum(value) **Возвращаемое значение** -- накопленная арифметическая разница, типа `Integer` или `Float`. +- Накопленная арифметическая разница. +Тип: `Integer` или `Float`. **Примеры** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md b/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md new file mode 100644 index 00000000000..10294eb9e6d --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md @@ -0,0 +1,45 @@ +--- +toc_priority: 141 +--- + +# deltaSumTimestamp {#agg_functions-deltasumtimestamp} + +Суммирует разницу между последовательными строками. Если разница отрицательна — она будет проигнорирована. + +Эта функция предназначена в первую очередь для [материализованных представлений](../../../sql-reference/statements/create/view.md#materialized), упорядоченных по некоторому временному бакету согласно timestamp, например, по бакету `toStartOfMinute`. Поскольку строки в таком материализованном представлении будут иметь одинаковый timestamp, невозможно объединить их в "правом" порядке. Функция отслеживает `timestamp` наблюдаемых значений, поэтому возможно правильно упорядочить состояния во время слияния. + +Чтобы вычислить разницу между упорядоченными последовательными строками, вы можете использовать функцию [deltaSum](../../../sql-reference/aggregate-functions/reference/deltasum.md#agg_functions-deltasum) вместо функции `deltaSumTimestamp`. + +**Синтаксис** + +``` sql +deltaSumTimestamp(value, timestamp) +``` + +**Аргументы** + +- `value` — входные значения, должны быть типа [Integer](../../data-types/int-uint.md), или [Float](../../data-types/float.md), или [Date](../../data-types/date.md), или [DateTime](../../data-types/datetime.md). +- `timestamp` — параметр для упорядочивания значений, должен быть типа [Integer](../../data-types/int-uint.md), или [Float](../../data-types/float.md), или [Date](../../data-types/date.md), или [DateTime](../../data-types/datetime.md). + +**Возвращаемое значение** + +- Накопленная разница между последовательными значениями, упорядоченными по параметру `timestamp`. + +Тип: [Integer](../../data-types/int-uint.md), или [Float](../../data-types/float.md), или [Date](../../data-types/date.md), или [DateTime](../../data-types/datetime.md). + +**Пример** + +Запрос: + +```sql +SELECT deltaSumTimestamp(value, timestamp) +FROM (SELECT number AS timestamp, [0, 4, 8, 3, 0, 0, 0, 1, 3, 5][number] AS value FROM numbers(1, 10)); +``` + +Результат: + +``` text +┌─deltaSumTimestamp(value, timestamp)─┐ +│ 13 │ +└─────────────────────────────────────┘ +``` diff --git a/docs/ru/sql-reference/aggregate-functions/reference/max.md b/docs/ru/sql-reference/aggregate-functions/reference/max.md deleted file mode 100644 index 4f61ecd051d..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/max.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -toc_priority: 3 ---- - -# max {#agg_function-max} - -Вычисляет максимум. - diff --git a/docs/ru/sql-reference/aggregate-functions/reference/max.md b/docs/ru/sql-reference/aggregate-functions/reference/max.md new file mode 120000 index 00000000000..ae47679c80e --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/max.md @@ -0,0 +1 @@ +../../../../en/sql-reference/aggregate-functions/reference/max.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/min.md b/docs/ru/sql-reference/aggregate-functions/reference/min.md deleted file mode 100644 index 16dd577e790..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/min.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -toc_priority: 2 ---- - -## min {#agg_function-min} - -Вычисляет минимум. - diff --git a/docs/ru/sql-reference/aggregate-functions/reference/min.md b/docs/ru/sql-reference/aggregate-functions/reference/min.md new file mode 120000 index 00000000000..61417b347a8 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/min.md @@ -0,0 +1 @@ +../../../../en/sql-reference/aggregate-functions/reference/min.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index f7239be0ba5..6dce79d8a89 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -12,6 +12,9 @@ toc_priority: 208 Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. +!!! note "Примечание" + Использование `quantileTDigestWeighted` [не рекомендуется для небольших наборов данных](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) и может привести к значительной ошибке. Рассмотрите возможность использования [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) в таких случаях. + **Синтаксис** ``` sql diff --git a/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md b/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md index c98e7b88bcf..73d0552fc6f 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md @@ -1,4 +1,8 @@ -## rankCorr {#agg_function-rankcorr} +--- +toc_priority: 145 +--- + +# rankCorr {#agg_function-rankcorr} Вычисляет коэффициент ранговой корреляции. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sumcount.md b/docs/ru/sql-reference/aggregate-functions/reference/sumcount.md new file mode 100644 index 00000000000..0606b06fba0 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/sumcount.md @@ -0,0 +1,46 @@ +--- +toc_priority: 144 +--- + +# sumCount {#agg_function-sumCount} + +Вычисляет сумму чисел и одновременно подсчитывает количество строк. + +**Синтаксис** + +``` sql +sumCount(x) +``` + +**Аргументы** + +- `x` — Входное значение типа [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), или [Decimal](../../../sql-reference/data-types/decimal.md). + +**Возвращаемое значение** + +- Кортеж из элементов `(sum, count)`, где `sum` — это сумма чисел и `count` — количество строк со значениями, отличными от `NULL`. + +Тип: [Tuple](../../../sql-reference/data-types/tuple.md). + +**Пример** + +Запрос: + +``` sql +CREATE TABLE s_table (x Nullable(Int8)) Engine = Log; +INSERT INTO s_table SELECT number FROM numbers(0, 20); +INSERT INTO s_table VALUES (NULL); +SELECT sumCount(x) from s_table; +``` + +Результат: + +``` text +┌─sumCount(x)─┐ +│ (190,20) │ +└─────────────┘ +``` + +**Смотрите также** + +- Настройка [optimize_fuse_sum_count_avg](../../../operations/settings/settings.md#optimize_fuse_sum_count_avg) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md index 4002cc06383..7a421d419ae 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -26,7 +26,7 @@ uniqHLL12(x[, ...]) - Использует алгоритм HyperLogLog для аппроксимации числа различных значений аргументов. - Используется 212 5-битовых ячеек. Размер состояния чуть больше 2.5 КБ. Результат не точный (ошибка до ~10%) для небольших множеств (<10K элементов). Однако для множеств большой кардинальности (10K - 100M) результат довольно точен (ошибка до ~1.6%). Начиная с 100M ошибка оценки будет только расти и для множеств огромной кардинальности (1B+ элементов) функция возвращает результат с очень большой неточностью. + Используется 2^12 5-битовых ячеек. Размер состояния чуть больше 2.5 КБ. Результат не точный (ошибка до ~10%) для небольших множеств (<10K элементов). Однако для множеств большой кардинальности (10K - 100M) результат довольно точен (ошибка до ~1.6%). Начиная с 100M ошибка оценки будет только расти и для множеств огромной кардинальности (1B+ элементов) функция возвращает результат с очень большой неточностью. - Результат детерминирован (не зависит от порядка выполнения запроса). diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index ebd780d0d7d..c9804f57c33 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -20,8 +20,7 @@ DateTime([timezone]) ## Использование {#ispolzovanie} Момент времени сохраняется как [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-%D0%B2%D1%80%D0%B5%D0%BC%D1%8F), независимо от часового пояса и переходов на летнее/зимнее время. Дополнительно, тип `DateTime` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime` в текстовом виде и как будут парситься значения заданные в виде строк (‘2020-01-01 05:00:01’). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки. -Список поддерживаемых временных зон можно найти в [IANA Time Zone Database](https://www.iana.org/time-zones). -Пакет `tzdata`, содержащий [базу данных часовых поясов IANA](https://www.iana.org/time-zones), должен быть установлен в системе. Используйте команду `timedatectl list-timezones` для получения списка часовых поясов, известных локальной системе. +Список поддерживаемых часовых поясов можно найти в [IANA Time Zone Database](https://www.iana.org/time-zones) или получить из базы данных, выполнив запрос `SELECT * FROM system.time_zones`. Также [список](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) есть в Википедии. Часовой пояс для столбца типа `DateTime` можно в явном виде установить при создании таблицы. Если часовой пояс не установлен, то ClickHouse использует значение параметра [timezone](../../sql-reference/data-types/datetime.md#server_configuration_parameters-timezone), установленное в конфигурации сервера или в настройках операционной системы на момент запуска сервера. diff --git a/docs/ru/sql-reference/data-types/datetime64.md b/docs/ru/sql-reference/data-types/datetime64.md index 6576bf9dc0d..3a08da75bb7 100644 --- a/docs/ru/sql-reference/data-types/datetime64.md +++ b/docs/ru/sql-reference/data-types/datetime64.md @@ -7,9 +7,9 @@ toc_title: DateTime64 Позволяет хранить момент времени, который может быть представлен как календарная дата и время, с заданной суб-секундной точностью. -Размер тика/точность: 10-precision секунд, где precision - целочисленный параметр типа. +Размер тика (точность, precision): 10-precision секунд, где precision - целочисленный параметр. -Синтаксис: +**Синтаксис:** ``` sql DateTime64(precision, [timezone]) @@ -17,9 +17,11 @@ DateTime64(precision, [timezone]) Данные хранятся в виде количества ‘тиков’, прошедших с момента начала эпохи (1970-01-01 00:00:00 UTC), в Int64. Размер тика определяется параметром precision. Дополнительно, тип `DateTime64` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime64` в текстовом виде и как будут парситься значения заданные в виде строк (‘2020-01-01 05:00:01.000’). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки. Подробнее см. [DateTime](datetime.md). -## Пример {#primer} +Поддерживаются значения от 1 января 1925 г. и до 31 декабря 2283 г. -**1.** Создание таблицы с столбцом типа `DateTime64` и вставка данных в неё: +## Примеры {#examples} + +1. Создание таблицы со столбцом типа `DateTime64` и вставка данных в неё: ``` sql CREATE TABLE dt @@ -27,15 +29,15 @@ CREATE TABLE dt `timestamp` DateTime64(3, 'Europe/Moscow'), `event_id` UInt8 ) -ENGINE = TinyLog +ENGINE = TinyLog; ``` ``` sql -INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2); ``` ``` sql -SELECT * FROM dt +SELECT * FROM dt; ``` ``` text @@ -46,12 +48,12 @@ SELECT * FROM dt ``` - При вставке даты-времени как числа (аналогично ‘Unix timestamp’), время трактуется как UTC. Unix timestamp `1546300800` в часовом поясе `Europe/London (UTC+0)` представляет время `'2019-01-01 00:00:00'`. Однако, столбец `timestamp` имеет тип `DateTime('Europe/Moscow (UTC+3)')`, так что при выводе в виде строки время отобразится как `2019-01-01 03:00:00`. -- При вставке даты-времени в виде строки, время трактуется соответственно часовому поясу установленному для колонки. `'2019-01-01 00:00:00'` трактуется как время по Москве (и в базу сохраняется `'2018-12-31 21:00:00'` в виде Unix Timestamp) +- При вставке даты-времени в виде строки, время трактуется соответственно часовому поясу установленному для колонки. `'2019-01-01 00:00:00'` трактуется как время по Москве (и в базу сохраняется `'2018-12-31 21:00:00'` в виде Unix Timestamp). -**2.** Фильтрация по значениям даты-времени +2. Фильтрация по значениям даты и времени ``` sql -SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow'); ``` ``` text @@ -60,12 +62,12 @@ SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europ └─────────────────────────┴──────────┘ ``` -В отличие от типа `DateTime`, `DateTime64` не конвертируется из строк автоматически +В отличие от типа `DateTime`, `DateTime64` не конвертируется из строк автоматически. -**3.** Получение часового пояса для значения типа `DateTime64`: +3. Получение часового пояса для значения типа `DateTime64`: ``` sql -SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x; ``` ``` text @@ -74,13 +76,13 @@ SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS └─────────────────────────┴────────────────────────────────┘ ``` -**4.** Конвертация часовых поясов +4. Конвертация часовых поясов ``` sql SELECT toDateTime64(timestamp, 3, 'Europe/London') as lon_time, toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time -FROM dt +FROM dt; ``` ``` text @@ -90,7 +92,7 @@ FROM dt └─────────────────────────┴─────────────────────────┘ ``` -## See Also {#see-also} +**See Also** - [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md) - [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) diff --git a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md index 0948153362b..7b81c577762 100644 --- a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md @@ -3,6 +3,8 @@ Хранит только текущее значение агрегатной функции и не сохраняет ее полное состояние, как это делает [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md). Такая оптимизация может быть применена к функциям, которые обладают следующим свойством: результат выполнения функции `f` к набору строк `S1 UNION ALL S2` может быть получен путем выполнения `f` к отдельным частям набора строк, а затем повторного выполнения `f` к результатам: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. Это свойство гарантирует, что результатов частичной агрегации достаточно для вычисления комбинированной, поэтому хранить и обрабатывать какие-либо дополнительные данные не требуется. +Чтобы получить промежуточное значение, обычно используются агрегатные функции с суффиксом [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate). + Поддерживаются следующие агрегатные функции: - [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any) @@ -15,10 +17,12 @@ - [`groupBitOr`](../../sql-reference/aggregate-functions/reference/groupbitor.md#groupbitor) - [`groupBitXor`](../../sql-reference/aggregate-functions/reference/groupbitxor.md#groupbitxor) - [`groupArrayArray`](../../sql-reference/aggregate-functions/reference/grouparray.md#agg_function-grouparray) -- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference/groupuniqarray.md#groupuniqarray) +- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference/groupuniqarray.md) - [`sumMap`](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) - [`minMap`](../../sql-reference/aggregate-functions/reference/minmap.md#agg_functions-minmap) - [`maxMap`](../../sql-reference/aggregate-functions/reference/maxmap.md#agg_functions-maxmap) +- [`argMin`](../../sql-reference/aggregate-functions/reference/argmin.md) +- [`argMax`](../../sql-reference/aggregate-functions/reference/argmax.md) !!! note "Примечание" Значения `SimpleAggregateFunction(func, Type)` отображаются и хранятся так же, как и `Type`, поэтому комбинаторы [-Merge](../../sql-reference/aggregate-functions/combinators.md#aggregate_functions_combinators-merge) и [-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) не требуются. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index 285982565c2..f1f4a8fae18 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -9,7 +9,7 @@ toc_title: "Хранение словарей в памяти" Рекомендуем [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) и [complex_key_hashed](#complex-key-hashed). Скорость обработки словарей при этом максимальна. -Размещение с кэшированием не рекомендуется использовать из-за потенциально низкой производительности и сложностей в подборе оптимальных параметров. Читайте об этом подробнее в разделе «[cache](#cache)». +Размещение с кэшированием не рекомендуется использовать из-за потенциально низкой производительности и сложностей в подборе оптимальных параметров. Читайте об этом подробнее в разделе [cache](#cache). Повысить производительность словарей можно следующими способами: @@ -48,7 +48,7 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings ... ``` -## Способы размещения словарей в памяти {#sposoby-razmeshcheniia-slovarei-v-pamiati} +## Способы размещения словарей в памяти {#ways-to-store-dictionaries-in-memory} - [flat](#flat) - [hashed](#dicts-external_dicts_dict_layout-hashed) @@ -65,11 +65,11 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings ### flat {#flat} -Словарь полностью хранится в оперативной памяти в виде плоских массивов. Объём памяти, занимаемой словарём пропорционален размеру самого большого по размеру ключа. +Словарь полностью хранится в оперативной памяти в виде плоских массивов. Объём памяти, занимаемой словарём, пропорционален размеру самого большого ключа (по объему). -Ключ словаря имеет тип `UInt64` и его величина ограничена 500 000. Если при создании словаря обнаружен ключ больше, то ClickHouse бросает исключение и не создает словарь. +Ключ словаря имеет тип [UInt64](../../../sql-reference/data-types/int-uint.md) и его величина ограничена параметром `max_array_size` (значение по умолчанию — 500 000). Если при создании словаря обнаружен ключ больше, то ClickHouse бросает исключение и не создает словарь. Начальный размер плоских массивов словарей контролируется параметром initial_array_size (по умолчанию - 1024). -Поддерживаются все виды источников. При обновлении, данные (из файла, из таблицы) читаются целиком. +Поддерживаются все виды источников. При обновлении данные (из файла или из таблицы) считываются целиком. Это метод обеспечивает максимальную производительность среди всех доступных способов размещения словаря. @@ -77,40 +77,52 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings ``` xml - + + 50000 + 5000000 + ``` или ``` sql -LAYOUT(FLAT()) +LAYOUT(FLAT(INITIAL_ARRAY_SIZE 50000 MAX_ARRAY_SIZE 5000000)) ``` ### hashed {#dicts-external_dicts_dict_layout-hashed} -Словарь полностью хранится в оперативной памяти в виде хэш-таблиц. Словарь может содержать произвольное количество элементов с произвольными идентификаторами. На практике, количество ключей может достигать десятков миллионов элементов. +Словарь полностью хранится в оперативной памяти в виде хэш-таблиц. Словарь может содержать произвольное количество элементов с произвольными идентификаторами. На практике количество ключей может достигать десятков миллионов элементов. -Поддерживаются все виды источников. При обновлении, данные (из файла, из таблицы) читаются целиком. +Если `preallocate` имеет значение `true` (по умолчанию `false`), хеш-таблица будет предварительно определена (это ускорит загрузку словаря). Используйте этот метод только в случае, если: + +- Источник поддерживает произвольное количество элементов (пока поддерживается только источником `ClickHouse`). +- В данных нет дубликатов (иначе это может увеличить объем используемой памяти хеш-таблицы). + +Поддерживаются все виды источников. При обновлении данные (из файла, из таблицы) читаются целиком. Пример конфигурации: ``` xml - + + 0 + ``` или ``` sql -LAYOUT(HASHED()) +LAYOUT(HASHED(PREALLOCATE 0)) ``` ### sparse_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} Аналогичен `hashed`, но при этом занимает меньше места в памяти и генерирует более высокую загрузку CPU. +Для этого типа размещения также можно задать `preallocate` в значении `true`. В данном случае это более важно, чем для типа `hashed`. + Пример конфигурации: ``` xml @@ -122,7 +134,7 @@ LAYOUT(HASHED()) или ``` sql -LAYOUT(SPARSE_HASHED()) +LAYOUT(SPARSE_HASHED([PREALLOCATE 0])) ``` ### complex_key_hashed {#complex-key-hashed} @@ -326,7 +338,7 @@ LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) ``` sql LAYOUT(SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 16777216 READ_BUFFER_SIZE 1048576 - PATH /var/lib/clickhouse/clickhouse_dictionaries/test_dict)) + PATH ./user_files/test_dict)) ``` ### complex_key_ssd_cache {#complex-key-ssd-cache} @@ -440,4 +452,3 @@ dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) Никакие другие типы не поддерживаются. Функция возвращает атрибут для префикса, соответствующего данному IP-адресу. Если есть перекрывающиеся префиксы, возвращается наиболее специфический. Данные должны полностью помещаться в оперативной памяти. - diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 9589353649d..1298f05eca0 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -86,3 +86,4 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... ``` +Для словарей `Cache`, `ComplexKeyCache`, `SSDCache` и `SSDComplexKeyCache` поддерживается как синхронное, так и асинхронное обновление. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index e3816e78547..ff83eb425d0 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -53,7 +53,7 @@ SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration или ``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) SETTINGS(format_csv_allow_single_quotes = 0) ``` @@ -69,6 +69,7 @@ SETTINGS(format_csv_allow_single_quotes = 0) - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - [Redis](#dicts-external_dicts_dict_sources-redis) + - [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql) ## Локальный файл {#dicts-external_dicts_dict_sources-local_file} @@ -86,7 +87,7 @@ SETTINGS(format_csv_allow_single_quotes = 0) или ``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) ``` Поля настройки: diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index 57f53390d1c..df4742fca45 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -3,7 +3,7 @@ toc_priority: 44 toc_title: "Ключ и поля словаря" --- -# Ключ и поля словаря {#kliuch-i-polia-slovaria} +# Ключ и поля словаря {#dictionary-key-and-fields} Секция `` описывает ключ словаря и поля, доступные для запросов. @@ -88,7 +88,7 @@ PRIMARY KEY Id - `PRIMARY KEY` – имя столбца с ключами. -### Составной ключ {#sostavnoi-kliuch} +### Составной ключ {#composite-key} Ключом может быть кортеж (`tuple`) из полей произвольных типов. В этом случае [layout](external-dicts-dict-layout.md) должен быть `complex_key_hashed` или `complex_key_cache`. @@ -159,13 +159,12 @@ CREATE DICTIONARY somename ( | Тег | Описание | Обязательный | |------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| | `name` | Имя столбца. | Да | -| `type` | Тип данных ClickHouse.
ClickHouse пытается привести значение из словаря к заданному типу данных. Например, в случае MySQL, в таблице-источнике поле может быть `TEXT`, `VARCHAR`, `BLOB`, но загружено может быть как `String`. [Nullable](../../../sql-reference/data-types/nullable.md) не поддерживается. | Да | -| `null_value` | Значение по умолчанию для несуществующего элемента.
В примере это пустая строка. Нельзя указать значение `NULL`. | Да | +| `type` | Тип данных ClickHouse.
ClickHouse пытается привести значение из словаря к заданному типу данных. Например, в случае MySQL, в таблице-источнике поле может быть `TEXT`, `VARCHAR`, `BLOB`, но загружено может быть как `String`.
[Nullable](../../../sql-reference/data-types/nullable.md) в настоящее время поддерживается для словарей [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache). Для словарей [IPTrie](external-dicts-dict-layout.md#ip-trie) `Nullable`-типы не поддерживаются. | Да | +| `null_value` | Значение по умолчанию для несуществующего элемента.
В примере это пустая строка. Значение [NULL](../../syntax.md#null-literal) можно указывать только для типов `Nullable` (см. предыдущую строку с описанием типов). | Да | | `expression` | [Выражение](../../syntax.md#syntax-expressions), которое ClickHouse выполняет со значением.
Выражением может быть имя столбца в удаленной SQL базе. Таким образом, вы можете использовать его для создания псевдонима удаленного столбца.

Значение по умолчанию: нет выражения. | Нет | -| `hierarchical` | Если `true`, то атрибут содержит ключ предка для текущего элемента. Смотрите [Иерархические словари](external-dicts-dict-hierarchical.md).

Default value: `false`. | No | +| `hierarchical` | Если `true`, то атрибут содержит ключ предка для текущего элемента. Смотрите [Иерархические словари](external-dicts-dict-hierarchical.md).

Значение по умолчанию: `false`. | Нет | | `is_object_id` | Признак того, что запрос выполняется к документу MongoDB по `ObjectID`.

Значение по умолчанию: `false`. | Нет | -## Смотрите также {#smotrite-takzhe} +**Смотрите также** - [Функции для работы с внешними словарями](../../../sql-reference/functions/ext-dict-functions.md). - diff --git a/docs/ru/sql-reference/dictionaries/index.md b/docs/ru/sql-reference/dictionaries/index.md index bd432497be8..84c6f1a3c13 100644 --- a/docs/ru/sql-reference/dictionaries/index.md +++ b/docs/ru/sql-reference/dictionaries/index.md @@ -10,10 +10,7 @@ toc_title: "Введение" ClickHouse поддерживает специальные функции для работы со словарями, которые можно использовать в запросах. Проще и эффективнее использовать словари с помощью функций, чем `JOIN` с таблицами-справочниками. -В словаре нельзя хранить значения [NULL](../../sql-reference/syntax.md#null-literal). - ClickHouse поддерживает: -- [Встроенные словари](internal-dicts.md#internal_dicts) со специфическим [набором функций](../../sql-reference/dictionaries/external-dictionaries/index.md). -- [Подключаемые (внешние) словари](external-dictionaries/external-dicts.md#dicts-external-dicts) с [набором функций](../../sql-reference/dictionaries/external-dictionaries/index.md). - +- [Встроенные словари](internal-dicts.md#internal_dicts) со специфическим [набором функций](../../sql-reference/functions/ext-dict-functions.md). +- [Подключаемые (внешние) словари](external-dictionaries/external-dicts.md#dicts-external-dicts) с [набором функций](../../sql-reference/functions/ext-dict-functions.md). \ No newline at end of file diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index 4538941a4a4..10fc91de205 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -1111,6 +1111,78 @@ SELECT Функция `arrayFilter` является [функцией высшего порядка](../../sql-reference/functions/index.md#higher-order-functions) — в качестве первого аргумента ей нужно передать лямбда-функцию, и этот аргумент не может быть опущен. +## arrayFill(func, arr1, …) {#array-fill} + +Перебирает `arr1` от первого элемента к последнему и заменяет `arr1[i]` на `arr1[i - 1]`, если `func` вернула 0. Первый элемент `arr1` остаётся неизменным. + +Примеры: + +``` sql +SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res──────────────────────────────┐ +│ [1,1,3,11,12,12,12,5,6,14,14,14] │ +└──────────────────────────────────┘ +``` + +Функция `arrayFill` является [функцией высшего порядка](../../sql-reference/functions/index.md#higher-order-functions) — в качестве первого аргумента ей нужно передать лямбда-функцию, и этот аргумент не может быть опущен. + +## arrayReverseFill(func, arr1, …) {#array-reverse-fill} + +Перебирает `arr1` от последнего элемента к первому и заменяет `arr1[i]` на `arr1[i + 1]`, если `func` вернула 0. Последний элемент `arr1` остаётся неизменным. + +Примеры: + +``` sql +SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res────────────────────────────────┐ +│ [1,3,3,11,12,5,5,5,6,14,NULL,NULL] │ +└────────────────────────────────────┘ +``` + +Функция `arrayReverseFill` является [функцией высшего порядка](../../sql-reference/functions/index.md#higher-order-functions) — в качестве первого аргумента ей нужно передать лямбда-функцию, и этот аргумент не может быть опущен. + +## arraySplit(func, arr1, …) {#array-split} + +Разделяет массив `arr1` на несколько. Если `func` возвращает не 0, то массив разделяется, а элемент помещается в левую часть. Массив не разбивается по первому элементу. + +Примеры: + +``` sql +SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res─────────────┐ +│ [[1,2,3],[4,5]] │ +└─────────────────┘ +``` + +Функция `arraySplit` является [функцией высшего порядка](../../sql-reference/functions/index.md#higher-order-functions) — в качестве первого аргумента ей нужно передать лямбда-функцию, и этот аргумент не может быть опущен. + +## arrayReverseSplit(func, arr1, …) {#array-reverse-split} + +Разделяет массив `arr1` на несколько. Если `func` возвращает не 0, то массив разделяется, а элемент помещается в правую часть. Массив не разбивается по последнему элементу. + +Примеры: + +``` sql +SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res───────────────┐ +│ [[1],[2,3,4],[5]] │ +└───────────────────┘ +``` + +Функция `arrayReverseSplit` является [функцией высшего порядка](../../sql-reference/functions/index.md#higher-order-functions) — в качестве первого аргумента ей нужно передать лямбда-функцию, и этот аргумент не может быть опущен. + ## arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} Возвращает 1, если существует хотя бы один элемент массива `arr`, для которого функция func возвращает не 0. Иначе возвращает 0. @@ -1137,7 +1209,7 @@ SELECT ## arrayMin {#array-min} -Возвращает значение минимального элемента в исходном массиве. +Возвращает значение минимального элемента в исходном массиве. Если передана функция `func`, возвращается минимум из элементов массива, преобразованных этой функцией. @@ -1192,7 +1264,7 @@ SELECT arrayMin(x -> (-x), [1, 2, 4]) AS res; ## arrayMax {#array-max} -Возвращает значение максимального элемента в исходном массиве. +Возвращает значение максимального элемента в исходном массиве. Если передана функция `func`, возвращается максимум из элементов массива, преобразованных этой функцией. @@ -1247,7 +1319,7 @@ SELECT arrayMax(x -> (-x), [1, 2, 4]) AS res; ## arraySum {#array-sum} -Возвращает сумму элементов в исходном массиве. +Возвращает сумму элементов в исходном массиве. Если передана функция `func`, возвращается сумма элементов массива, преобразованных этой функцией. @@ -1262,7 +1334,7 @@ arraySum([func,] arr) **Аргументы** - `func` — функция. [Expression](../../sql-reference/data-types/special-data-types/expression.md). -- `arr` — массив. [Array](../../sql-reference/data-types/array.md). +- `arr` — массив. [Array](../../sql-reference/data-types/array.md). **Возвращаемое значение** @@ -1302,7 +1374,7 @@ SELECT arraySum(x -> x*x, [2, 3]) AS res; ## arrayAvg {#array-avg} -Возвращает среднее значение элементов в исходном массиве. +Возвращает среднее значение элементов в исходном массиве. Если передана функция `func`, возвращается среднее значение элементов массива, преобразованных этой функцией. @@ -1317,7 +1389,7 @@ arrayAvg([func,] arr) **Аргументы** - `func` — функция. [Expression](../../sql-reference/data-types/special-data-types/expression.md). -- `arr` — массив. [Array](../../sql-reference/data-types/array.md). +- `arr` — массив. [Array](../../sql-reference/data-types/array.md). **Возвращаемое значение** @@ -1355,7 +1427,7 @@ SELECT arrayAvg(x -> (x * x), [2, 4]) AS res; └─────┘ ``` -**Синтаксис** +**Синтаксис** ``` sql arraySum(arr) @@ -1367,7 +1439,7 @@ arraySum(arr) Тип: [Int](../../sql-reference/data-types/int-uint.md) или [Float](../../sql-reference/data-types/float.md). -**Аргументы** +**Аргументы** - `arr` — [массив](../../sql-reference/data-types/array.md). @@ -1456,3 +1528,52 @@ SELECT arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]); └────────────────────────────────────────---──┘ ``` +## arrayProduct {#arrayproduct} + +Возвращает произведение элементов [массива](../../sql-reference/data-types/array.md). + +**Синтаксис** + +``` sql +arrayProduct(arr) +``` + +**Аргументы** + +- `arr` — [массив](../../sql-reference/data-types/array.md) числовых значений. + +**Возвращаемое значение** + +- Произведение элементов массива. + +Тип: [Float64](../../sql-reference/data-types/float.md). + +**Примеры** + +Запрос: + +``` sql +SELECT arrayProduct([1,2,3,4,5,6]) as res; +``` + +Результат: + +``` text +┌─res───┐ +│ 720 │ +└───────┘ +``` + +Запрос: + +``` sql +SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as res, toTypeName(res); +``` + +Возвращаемое значение всегда имеет тип [Float64](../../sql-reference/data-types/float.md). Результат: + +``` text +┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐ +│ 6 │ Float64 │ +└─────┴──────────────────────────────────────────────────────────────────────────────────────────┘ +``` \ No newline at end of file diff --git a/docs/ru/sql-reference/functions/bit-functions.md b/docs/ru/sql-reference/functions/bit-functions.md index 09844685a6c..a5124e67235 100644 --- a/docs/ru/sql-reference/functions/bit-functions.md +++ b/docs/ru/sql-reference/functions/bit-functions.md @@ -240,3 +240,53 @@ SELECT bitCount(333); └───────────────┘ ``` +## bitHammingDistance {#bithammingdistance} + +Возвращает [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между битовыми представлениями двух целых чисел. Может быть использовано с функциями [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash) для проверки двух строк на схожесть. Чем меньше расстояние, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +bitHammingDistance(int1, int2) +``` + +**Аргументы** + +- `int1` — первое целое число. [Int64](../../sql-reference/data-types/int-uint.md). +- `int2` — второе целое число. [Int64](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Расстояние Хэмминга. + +Тип: [UInt8](../../sql-reference/data-types/int-uint.md). + +**Примеры** + +Запрос: + +``` sql +SELECT bitHammingDistance(111, 121); +``` + +Результат: + +``` text +┌─bitHammingDistance(111, 121)─┐ +│ 3 │ +└──────────────────────────────┘ +``` + +Используя [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash): + +``` sql +SELECT bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat')); +``` + +Результат: + +``` text +┌─bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat'))─┐ +│ 5 │ +└──────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/ru/sql-reference/functions/bitmap-functions.md b/docs/ru/sql-reference/functions/bitmap-functions.md index ddae2f3eb40..3da729664d0 100644 --- a/docs/ru/sql-reference/functions/bitmap-functions.md +++ b/docs/ru/sql-reference/functions/bitmap-functions.md @@ -25,7 +25,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res); ``` text ┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ -│  │ AggregateFunction(groupBitmap, UInt8) │ +│ │ AggregateFunction(groupBitmap, UInt8) │ └─────┴──────────────────────────────────────────────┘ ``` diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 0adccbe888b..b442a782100 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -23,13 +23,53 @@ SELECT └─────────────────────┴────────────┴────────────┴─────────────────────┘ ``` +## timeZone {#timezone} + +Возвращает часовой пояс сервера. + +**Синтаксис** + +``` sql +timeZone() +``` + +Псевдоним: `timezone`. + +**Возвращаемое значение** + +- Часовой пояс. + +Тип: [String](../../sql-reference/data-types/string.md). + ## toTimeZone {#totimezone} -Переводит дату или дату-с-временем в указанный часовой пояс. Часовой пояс (таймзона) это атрибут типов Date/DateTime, внутреннее значение (количество секунд) поля таблицы или колонки результата не изменяется, изменяется тип поля и автоматически его текстовое отображение. +Переводит дату или дату с временем в указанный часовой пояс. Часовой пояс - это атрибут типов `Date` и `DateTime`. Внутреннее значение (количество секунд) поля таблицы или результирующего столбца не изменяется, изменяется тип поля и, соответственно, его текстовое отображение. + +**Синтаксис** + +``` sql +toTimezone(value, timezone) +``` + +Псевдоним: `toTimezone`. + +**Аргументы** + +- `value` — время или дата с временем. [DateTime64](../../sql-reference/data-types/datetime64.md). +- `timezone` — часовой пояс для возвращаемого значения. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- Дата с временем. + +Тип: [DateTime](../../sql-reference/data-types/datetime.md). + +**Пример** + +Запрос: ```sql -SELECT - toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, +SELECT toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, toTypeName(time_utc) AS type_utc, toInt32(time_utc) AS int32utc, toTimeZone(time_utc, 'Asia/Yekaterinburg') AS time_yekat, @@ -40,6 +80,7 @@ SELECT toInt32(time_samoa) AS int32samoa FORMAT Vertical; ``` +Результат: ```text Row 1: @@ -57,6 +98,82 @@ int32samoa: 1546300800 `toTimeZone(time_utc, 'Asia/Yekaterinburg')` изменяет тип `DateTime('UTC')` в `DateTime('Asia/Yekaterinburg')`. Значение (unix-время) 1546300800 остается неизменным, но текстовое отображение (результат функции toString()) меняется `time_utc: 2019-01-01 00:00:00` в `time_yekat: 2019-01-01 05:00:00`. +## timeZoneOf {#timezoneof} + +Возвращает название часового пояса для значений типа [DateTime](../../sql-reference/data-types/datetime.md) и [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Синтаксис** + +``` sql +timeZoneOf(value) +``` + +Псевдоним: `timezoneOf`. + +**Аргументы** + +- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Возвращаемое значение** + +- Название часового пояса. + +Тип: [String](../../sql-reference/data-types/string.md). + +**Пример** + +Запрос: +``` sql +SELECT timezoneOf(now()); +``` + +Результат: +``` text +┌─timezoneOf(now())─┐ +│ Etc/UTC │ +└───────────────────┘ +``` + +## timeZoneOffset {#timezoneoffset} + +Возвращает смещение часового пояса в секундах от [UTC](https://ru.wikipedia.org/wiki/Всемирное_координированное_время). Функция учитывает [летнее время](https://ru.wikipedia.org/wiki/Летнее_время) и исторические изменения часовых поясов, которые действовали на указанную дату. +Для вычисления смещения используется информация из [базы данных IANA](https://www.iana.org/time-zones). + +**Синтаксис** + +``` sql +timeZoneOffset(value) +``` + +Псевдоним: `timezoneOffset`. + +**Аргументы** + +- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Возвращаемое значение** + +- Смещение в секундах от UTC. + +Тип: [Int32](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT toDateTime('2021-04-21 10:20:30', 'Europe/Moscow') AS Time, toTypeName(Time) AS Type, + timeZoneOffset(Time) AS Offset_in_seconds, (Offset_in_seconds / 3600) AS Offset_in_hours; +``` + +Результат: + +``` text +┌────────────────Time─┬─Type──────────────────────┬─Offset_in_seconds─┬─Offset_in_hours─┐ +│ 2021-04-21 10:20:30 │ DateTime('Europe/Moscow') │ 10800 │ 3 │ +└─────────────────────┴───────────────────────────┴───────────────────┴─────────────────┘ +``` + ## toYear {#toyear} Переводит дату или дату-с-временем в число типа UInt16, содержащее номер года (AD). @@ -943,4 +1060,3 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime; │ 2009-02-11 14:42:23 │ └─────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/functions/encoding-functions.md b/docs/ru/sql-reference/functions/encoding-functions.md index f4fa21ba46a..23e840a7898 100644 --- a/docs/ru/sql-reference/functions/encoding-functions.md +++ b/docs/ru/sql-reference/functions/encoding-functions.md @@ -153,8 +153,60 @@ Result: ## unhex(str) {#unhexstr} -Accepts a string containing any number of hexadecimal digits, and returns a string containing the corresponding bytes. Supports both uppercase and lowercase letters A-F. The number of hexadecimal digits does not have to be even. If it is odd, the last digit is interpreted as the least significant half of the 00-0F byte. If the argument string contains anything other than hexadecimal digits, some implementation-defined result is returned (an exception isn’t thrown). -If you want to convert the result to a number, you can use the ‘reverse’ and ‘reinterpretAsType’ functions. +Выполняет операцию, обратную [hex](#hex). Функция интерпретирует каждую пару шестнадцатеричных цифр аргумента как число и преобразует его в символ. Возвращаемое значение представляет собой двоичную строку (BLOB). + +Если вы хотите преобразовать результат в число, вы можете использовать функции [reverse](../../sql-reference/functions/string-functions.md#reverse) и [reinterpretAs](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions). + +!!! note "Примечание" + Если `unhex` вызывается из `clickhouse-client`, двоичные строки отображаются с использованием UTF-8. + +Синоним: `UNHEX`. + +**Синтаксис** + +``` sql +unhex(arg) +``` + +**Аргументы** + +- `arg` — Строка, содержащая любое количество шестнадцатеричных цифр. Тип: [String](../../sql-reference/data-types/string.md). + +Поддерживаются как прописные, так и строчные буквы `A-F`. Количество шестнадцатеричных цифр не обязательно должно быть четным. Если оно нечетное, последняя цифра интерпретируется как наименее значимая половина байта `00-0F`. Если строка аргумента содержит что-либо, кроме шестнадцатеричных цифр, возвращается некоторый результат, определенный реализацией (исключение не создается). + +**Возвращаемое значение** + +- Бинарная строка (BLOB). + +Тип: [String](../../sql-reference/data-types/string.md). + +**Пример** + +Запрос: +``` sql +SELECT unhex('303132'), UNHEX('4D7953514C'); +``` + +Результат: +``` text +┌─unhex('303132')─┬─unhex('4D7953514C')─┐ +│ 012 │ MySQL │ +└─────────────────┴─────────────────────┘ +``` + +Запрос: + +``` sql +SELECT reinterpretAsUInt64(reverse(unhex('FFF'))) AS num; +``` + +Результат: + +``` text +┌──num─┐ +│ 4095 │ +└──────┘ +``` ## UUIDStringToNum(str) {#uuidstringtonumstr} @@ -171,4 +223,3 @@ If you want to convert the result to a number, you can use the ‘reverse’ and ## bitmaskToArray(num) {#bitmasktoarraynum} Принимает целое число. Возвращает массив чисел типа UInt64, содержащий степени двойки, в сумме дающих исходное число; числа в массиве идут по возрастанию. - diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 844f9cc3197..44957fde152 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -5,11 +5,11 @@ toc_title: "Функции для шифрования" # Функции шифрования {#encryption-functions} -Даннвые функции реализуют шифрование и расшифровку данных с помощью AES (Advanced Encryption Standard) алгоритма. +Данные функции реализуют шифрование и расшифровку данных с помощью AES (Advanced Encryption Standard) алгоритма. Длина ключа зависит от режима шифрования. Он может быть длинной в 16, 24 и 32 байта для режимов шифрования `-128-`, `-196-` и `-256-` соответственно. -Длина инициализирующего вектора всегда 16 байт (лишнии байты игнорируются). +Длина инициализирующего вектора всегда 16 байт (лишние байты игнорируются). Обратите внимание, что до версии Clickhouse 21.1 эти функции работали медленно. diff --git a/docs/ru/sql-reference/functions/ext-dict-functions.md b/docs/ru/sql-reference/functions/ext-dict-functions.md index 919f8ebe276..612477dc806 100644 --- a/docs/ru/sql-reference/functions/ext-dict-functions.md +++ b/docs/ru/sql-reference/functions/ext-dict-functions.md @@ -3,25 +3,29 @@ toc_priority: 58 toc_title: "Функции для работы с внешними словарями" --- +!!! attention "Внимание" + Для словарей, созданных с помощью [DDL-запросов](../../sql-reference/statements/create/dictionary.md), в параметре `dict_name` указывается полное имя словаря вместе с базой данных, например: `.`. Если база данных не указана, используется текущая. + # Функции для работы с внешними словарями {#ext_dict_functions} Информацию о подключении и настройке внешних словарей смотрите в разделе [Внешние словари](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). -## dictGet {#dictget} +## dictGet, dictGetOrDefault, dictGetOrNull {#dictget} Извлекает значение из внешнего словаря. ``` sql -dictGet('dict_name', 'attr_name', id_expr) -dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +dictGet('dict_name', attr_names, id_expr) +dictGetOrDefault('dict_name', attr_names, id_expr, default_value_expr) +dictGetOrNull('dict_name', attr_name, id_expr) ``` **Аргументы** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `attr_name` — имя столбца словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md) или [Tuple](../../sql-reference/functions/ext-dict-functions.md) в зависимости от конфигурации словаря. -- `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions) возвращающее значение с типом данных, сконфигурированным для атрибута `attr_name`. +- `attr_names` — имя столбца словаря, [Строковый литерал](../syntax.md#syntax-string-literal), или кортеж [Tuple](../../sql-reference/data-types/tuple.md) таких имен. +- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md) или [Tuple](../../sql-reference/functions/ext-dict-functions.md), в зависимости от конфигурации словаря. +- `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions), возвращающее значение с типом данных, сконфигурированным для атрибута `attr_names`, или кортеж [Tuple](../../sql-reference/data-types/tuple.md) таких выражений. **Возвращаемое значение** @@ -31,10 +35,11 @@ dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dictGet` возвращает содержимое элемента ``, указанного для атрибута в конфигурации словаря. - `dictGetOrDefault` возвращает атрибут `default_value_expr`. + - `dictGetOrNull` возвращает `NULL` в случае, если ключ не найден в словаре. Если значение атрибута не удалось обработать или оно не соответствует типу данных атрибута, то ClickHouse генерирует исключение. -**Пример** +**Пример с единственным атрибутом** Создадим текстовый файл `ext-dict-text.csv` со следующим содержимым: @@ -93,6 +98,130 @@ LIMIT 3 └─────┴────────┘ ``` +**Пример с несколькими атрибутами** + +Создадим текстовый файл `ext-dict-mult.csv` со следующим содержимым: + +``` text +1,1,'1' +2,2,'2' +3,3,'3' +``` + +Первый столбец — `id`, второй столбец — `c1`, третий столбец — `c2`. + +Настройка внешнего словаря: + +``` xml + + + ext-dict-mult + + + /path-to/ext-dict-mult.csv + CSV + + + + + + + + id + + + c1 + UInt32 + + + + c2 + String + + + + 0 + + +``` + +Выполним запрос: + +``` sql +SELECT + dictGet('ext-dict-mult', ('c1','c2'), number) AS val, + toTypeName(val) AS type +FROM system.numbers +LIMIT 3; +``` + +``` text +┌─val─────┬─type──────────────────┐ +│ (1,'1') │ Tuple(UInt8, String) │ +│ (2,'2') │ Tuple(UInt8, String) │ +│ (3,'3') │ Tuple(UInt8, String) │ +└─────────┴───────────────────────┘ +``` + +**Пример для словаря с диапазоном ключей** + +Создадим таблицу: + +```sql +CREATE TABLE range_key_dictionary_source_table +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +ENGINE = TinyLog(); + +INSERT INTO range_key_dictionary_source_table VALUES(1, toDate('2019-05-20'), toDate('2019-05-20'), 'First', 'First'); +INSERT INTO range_key_dictionary_source_table VALUES(2, toDate('2019-05-20'), toDate('2019-05-20'), 'Second', NULL); +INSERT INTO range_key_dictionary_source_table VALUES(3, toDate('2019-05-20'), toDate('2019-05-20'), 'Third', 'Third'); +``` + +Создадим внешний словарь: + +```sql +CREATE DICTIONARY range_key_dictionary +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_key_dictionary_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); +``` + +Выполним запрос: + +``` sql +SELECT + (number, toDate('2019-05-20')), + dictHas('range_key_dictionary', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value_nullable', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', ('value', 'value_nullable'), number, toDate('2019-05-20')) +FROM system.numbers LIMIT 5 FORMAT TabSeparated; +``` +Результат: + +``` text +(0,'2019-05-20') 0 \N \N (NULL,NULL) +(1,'2019-05-20') 1 First First ('First','First') +(2,'2019-05-20') 0 \N \N (NULL,NULL) +(3,'2019-05-20') 0 \N \N (NULL,NULL) +(4,'2019-05-20') 0 \N \N (NULL,NULL) +``` + **Смотрите также** - [Внешние словари](../../sql-reference/functions/ext-dict-functions.md) @@ -115,7 +244,7 @@ dictHas('dict_name', id) - 0, если ключа нет. - 1, если ключ есть. -Тип — `UInt8`. +Тип: [UInt8](../../sql-reference/data-types/int-uint.md). ## dictGetHierarchy {#dictgethierarchy} @@ -136,7 +265,7 @@ dictGetHierarchy('dict_name', key) - Цепочка предков заданного ключа. -Type: [Array(UInt64)](../../sql-reference/functions/ext-dict-functions.md). +Type: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)). ## dictIsIn {#dictisin} @@ -155,7 +284,120 @@ Type: [Array(UInt64)](../../sql-reference/functions/ext-dict-functions.md). - 0, если `child_id_expr` — не дочерний элемент `ancestor_id_expr`. - 1, если `child_id_expr` — дочерний элемент `ancestor_id_expr` или если `child_id_expr` и есть `ancestor_id_expr`. -Тип — `UInt8`. +Тип: [UInt8](../../sql-reference/data-types/int-uint.md). + +## dictGetChildren {#dictgetchildren} + +Возвращает потомков первого уровня в виде массива индексов. Это обратное преобразование для [dictGetHierarchy](#dictgethierarchy). + +**Синтаксис** + +``` sql +dictGetChildren(dict_name, key) +``` + +**Аргументы** + +- `dict_name` — имя словаря. [String literal](../../sql-reference/syntax.md#syntax-string-literal). +- `key` — значение ключа. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md). + +**Возвращаемые значения** + +- Потомки первого уровня для ключа. + +Тип: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Рассмотрим иерархический словарь: + +``` text +┌─id─┬─parent_id─┐ +│ 1 │ 0 │ +│ 2 │ 1 │ +│ 3 │ 1 │ +│ 4 │ 2 │ +└────┴───────────┘ +``` + +Потомки первого уровня: + +``` sql +SELECT dictGetChildren('hierarchy_flat_dictionary', number) FROM system.numbers LIMIT 4; +``` + +``` text +┌─dictGetChildren('hierarchy_flat_dictionary', number)─┐ +│ [1] │ +│ [2,3] │ +│ [4] │ +│ [] │ +└──────────────────────────────────────────────────────┘ +``` + +## dictGetDescendant {#dictgetdescendant} + +Возвращает всех потомков, как если бы функция [dictGetChildren](#dictgetchildren) была выполнена `level` раз рекурсивно. + +**Синтаксис** + +``` sql +dictGetDescendants(dict_name, key, level) +``` + +**Аргументы** + +- `dict_name` — имя словаря. [String literal](../../sql-reference/syntax.md#syntax-string-literal). +- `key` — значение ключа. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md). +- `level` — уровень иерархии. Если `level = 0`, возвращаются все потомки. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемые значения** + +- Потомки для ключа. + +Тип: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Рассмотрим иерархический словарь: + +``` text +┌─id─┬─parent_id─┐ +│ 1 │ 0 │ +│ 2 │ 1 │ +│ 3 │ 1 │ +│ 4 │ 2 │ +└────┴───────────┘ +``` +Все потомки: + +``` sql +SELECT dictGetDescendants('hierarchy_flat_dictionary', number) FROM system.numbers LIMIT 4; +``` + +``` text +┌─dictGetDescendants('hierarchy_flat_dictionary', number)─┐ +│ [1,2,3,4] │ +│ [2,3,4] │ +│ [4] │ +│ [] │ +└─────────────────────────────────────────────────────────┘ +``` + +Потомки первого уровня: + +``` sql +SELECT dictGetDescendants('hierarchy_flat_dictionary', number, 1) FROM system.numbers LIMIT 4; +``` + +``` text +┌─dictGetDescendants('hierarchy_flat_dictionary', number, 1)─┐ +│ [1] │ +│ [2,3] │ +│ [4] │ +│ [] │ +└────────────────────────────────────────────────────────────┘ +``` ## Прочие функции {#ext_dict_functions-other} @@ -197,4 +439,3 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dictGet[Type]OrDefault` возвращает аргумент `default_value_expr`. Если значение атрибута не удалось обработать или оно не соответствует типу данных атрибута, то ClickHouse генерирует исключение. - diff --git a/docs/ru/sql-reference/functions/files.md b/docs/ru/sql-reference/functions/files.md new file mode 100644 index 00000000000..9cb659375b9 --- /dev/null +++ b/docs/ru/sql-reference/functions/files.md @@ -0,0 +1,33 @@ +--- +toc_priority: 43 +toc_title: "Функции для работы с файлами" +--- + +# Функции для работы с файлами {#funktsii-dlia-raboty-s-failami} + +## file {#file} + +Читает файл как строку. Содержимое файла не разбирается (не парсится) и записывается в указанную колонку в виде единой строки. + +**Синтаксис** + +``` sql +file(path) +``` + +**Аргументы** + +- `path` — относительный путь до файла от [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Путь к файлу может включать следующие символы подстановки и шаблоны: `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, `'abc', 'def'` — строки. + +**Примеры** + +Вставка данных из файлов a.txt и b.txt в таблицу в виде строк: + +``` sql +INSERT INTO table SELECT file('a.txt'), file('b.txt'); +``` + +**Смотрите также** + +- [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path) +- [file](../table-functions/file.md) diff --git a/docs/ru/sql-reference/functions/functions-for-nulls.md b/docs/ru/sql-reference/functions/functions-for-nulls.md index 365dba75da7..7285f803264 100644 --- a/docs/ru/sql-reference/functions/functions-for-nulls.md +++ b/docs/ru/sql-reference/functions/functions-for-nulls.md @@ -224,7 +224,7 @@ assumeNotNull(x) **Возвращаемые значения** - Исходное значение с не `Nullable` типом, если оно — не `NULL`. -- Значение по умолчанию для не `Nullable` типа, если исходное значение — `NULL`. +- Неспецифицированный результат, зависящий от реализации, если исходное значение — `NULL`. **Пример** diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md index 6797f530346..07c741e0588 100644 --- a/docs/ru/sql-reference/functions/hash-functions.md +++ b/docs/ru/sql-reference/functions/hash-functions.md @@ -7,6 +7,8 @@ toc_title: "Функции хэширования" Функции хэширования могут использоваться для детерминированного псевдослучайного разбрасывания элементов. +Simhash – это хеш-функция, которая для близких значений возвращает близкий хеш. + ## halfMD5 {#hash-functions-halfmd5} [Интерпретирует](../../sql-reference/functions/hash-functions.md#type_conversion_functions-reinterpretAsString) все входные параметры как строки и вычисляет хэш [MD5](https://ru.wikipedia.org/wiki/MD5) для каждой из них. Затем объединяет хэши, берет первые 8 байт хэша результирующей строки и интерпретирует их как значение типа `UInt64` с big-endian порядком байтов. @@ -428,7 +430,7 @@ murmurHash3_128( expr ) **Аргументы** -- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа[String](../../sql-reference/functions/hash-functions.md). +- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -437,13 +439,13 @@ murmurHash3_128( expr ) **Пример** ``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type; +SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text -┌─MurmurHash3──────┬─type────────────┐ -│ 6�1�4"S5KT�~~q │ FixedString(16) │ -└──────────────────┴─────────────────┘ +┌─MurmurHash3──────────────────────┬─type───┐ +│ 368A1A311CB7342253354B548E7E7E71 │ String │ +└──────────────────────────────────┴────────┘ ``` ## xxHash32, xxHash64 {#hash-functions-xxhash32-xxhash64} @@ -484,3 +486,937 @@ SELECT xxHash32('Hello, world!'); - [xxHash](http://cyan4973.github.io/xxHash/). +## ngramSimHash {#ngramsimhash} + +Выделяет из ASCII строки отрезки (n-граммы) размером `ngramsize` символов и возвращает n-граммовый `simhash`. Функция регистрозависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). Чем меньше [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между результатом вычисления `simhash` двух строк, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +ngramSimHash(string[, ngramsize]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Значение хеш-функции от строки. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT ngramSimHash('ClickHouse') AS Hash; +``` + +Результат: + +``` text +┌───────Hash─┐ +│ 1627567969 │ +└────────────┘ +``` + +## ngramSimHashCaseInsensitive {#ngramsimhashcaseinsensitive} + +Выделяет из ASCII строки отрезки (n-граммы) размером `ngramsize` символов и возвращает n-граммовый `simhash`. Функция регистро**не**зависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). Чем меньше [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между результатом вычисления `simhash` двух строк, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +ngramSimHashCaseInsensitive(string[, ngramsize]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Значение хеш-функции от строки. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash; +``` + +Результат: + +``` text +┌──────Hash─┐ +│ 562180645 │ +└───────────┘ +``` + +## ngramSimHashUTF8 {#ngramsimhashutf8} + +Выделяет из UTF-8 строки отрезки (n-граммы) размером `ngramsize` символов и возвращает n-граммовый `simhash`. Функция регистрозависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). Чем меньше [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между результатом вычисления `simhash` двух строк, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +ngramSimHashUTF8(string[, ngramsize]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Значение хеш-функции от строки. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT ngramSimHashUTF8('ClickHouse') AS Hash; +``` + +Результат: + +``` text +┌───────Hash─┐ +│ 1628157797 │ +└────────────┘ +``` + +## ngramSimHashCaseInsensitiveUTF8 {#ngramsimhashcaseinsensitiveutf8} + +Выделяет из UTF-8 строки отрезки (n-граммы) размером `ngramsize` символов и возвращает n-граммовый `simhash`. Функция регистро**не**зависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). Чем меньше [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между результатом вычисления `simhash` двух строк, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +ngramSimHashCaseInsensitiveUTF8(string[, ngramsize]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Значение хеш-функции от строки. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash; +``` + +Результат: + +``` text +┌───────Hash─┐ +│ 1636742693 │ +└────────────┘ +``` + +## wordShingleSimHash {#wordshinglesimhash} + +Выделяет из ASCII строки отрезки (шинглы) из `shinglesize` слов и возвращает шингловый `simhash`. Функция регистрозависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). Чем меньше [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между результатом вычисления `simhash` двух строк, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +wordShingleSimHash(string[, shinglesize]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Значение хеш-функции от строки. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; +``` + +Результат: + +``` text +┌───────Hash─┐ +│ 2328277067 │ +└────────────┘ +``` + +## wordShingleSimHashCaseInsensitive {#wordshinglesimhashcaseinsensitive} + +Выделяет из ASCII строки отрезки (шинглы) из `shinglesize` слов и возвращает шингловый `simhash`. Функция регистро**не**зависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). Чем меньше [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между результатом вычисления `simhash` двух строк, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +wordShingleSimHashCaseInsensitive(string[, shinglesize]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Значение хеш-функции от строки. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; +``` + +Результат: + +``` text +┌───────Hash─┐ +│ 2194812424 │ +└────────────┘ +``` + +## wordShingleSimHashUTF8 {#wordshinglesimhashutf8} + +Выделяет из UTF-8 строки отрезки (шинглы) из `shinglesize` слов и возвращает шингловый `simhash`. Функция регистрозависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). Чем меньше [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между результатом вычисления `simhash` двух строк, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +wordShingleSimHashUTF8(string[, shinglesize]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Значение хеш-функции от строки. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; +``` + +Результат: + +``` text +┌───────Hash─┐ +│ 2328277067 │ +└────────────┘ +``` + +## wordShingleSimHashCaseInsensitiveUTF8 {#wordshinglesimhashcaseinsensitiveutf8} + +Выделяет из UTF-8 строки отрезки (шинглы) из `shinglesize` слов и возвращает шингловый `simhash`. Функция регистро**не**зависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). Чем меньше [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между результатом вычисления `simhash` двух строк, тем больше вероятность, что строки совпадают. + +**Синтаксис** + +``` sql +wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Значение хеш-функции от строки. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; +``` + +Результат: + +``` text +┌───────Hash─┐ +│ 2194812424 │ +└────────────┘ +``` + +## ngramMinHash {#ngramminhash} + +Выделяет из ASCII строки отрезки (n-граммы) размером `ngramsize` символов и вычисляет хеш для каждой n-граммы. Использует `hashnum` минимальных хешей, чтобы вычислить минимальный хеш, и `hashnum` максимальных хешей, чтобы вычислить максимальный хеш. Возвращает кортеж из этих хешей. Функция регистрозависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. + +**Синтаксис** + +``` sql +ngramMinHash(string[, ngramsize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж с двумя хешами — минимальным и максимальным. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Запрос: + +``` sql +SELECT ngramMinHash('ClickHouse') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (18333312859352735453,9054248444481805918) │ +└────────────────────────────────────────────┘ +``` + +## ngramMinHashCaseInsensitive {#ngramminhashcaseinsensitive} + +Выделяет из ASCII строки отрезки (n-граммы) размером `ngramsize` символов и вычисляет хеш для каждой n-граммы. Использует `hashnum` минимальных хешей, чтобы вычислить минимальный хеш, и `hashnum` максимальных хешей, чтобы вычислить максимальный хеш. Возвращает кортеж из этих хешей. Функция регистро**не**зависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. + +**Синтаксис** + +``` sql +ngramMinHashCaseInsensitive(string[, ngramsize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж с двумя хешами — минимальным и максимальным. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Запрос: + +``` sql +SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (2106263556442004574,13203602793651726206) │ +└────────────────────────────────────────────┘ +``` + +## ngramMinHashUTF8 {#ngramminhashutf8} + +Выделяет из UTF-8 строки отрезки (n-граммы) размером `ngramsize` символов и вычисляет хеш для каждой n-граммы. Использует `hashnum` минимальных хешей, чтобы вычислить минимальный хеш, и `hashnum` максимальных хешей, чтобы вычислить максимальный хеш. Возвращает кортеж из этих хешей. Функция регистрозависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. + +**Синтаксис** +``` sql +ngramMinHashUTF8(string[, ngramsize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж с двумя хешами — минимальным и максимальным. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Запрос: + +``` sql +SELECT ngramMinHashUTF8('ClickHouse') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (18333312859352735453,6742163577938632877) │ +└────────────────────────────────────────────┘ +``` + +## ngramMinHashCaseInsensitiveUTF8 {#ngramminhashcaseinsensitiveutf8} + +Выделяет из UTF-8 строки отрезки (n-граммы) размером `ngramsize` символов и вычисляет хеш для каждой n-граммы. Использует `hashnum` минимальных хешей, чтобы вычислить минимальный хеш, и `hashnum` максимальных хешей, чтобы вычислить максимальный хеш. Возвращает кортеж из этих хешей. Функция регистро**не**зависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. + +**Синтаксис** + +``` sql +ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж с двумя хешами — минимальным и максимальным. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Запрос: + +``` sql +SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple───────────────────────────────────────┐ +│ (12493625717655877135,13203602793651726206) │ +└─────────────────────────────────────────────┘ +``` + +## ngramMinHashArg {#ngramminhasharg} + +Выделяет из ASCII строки отрезки (n-граммы) размером `ngramsize` символов и возвращает n-граммы с минимальным и максимальным хешами, вычисленными функцией [ngramMinHash](#ngramminhash) с теми же входными данными. Функция регистрозависимая. + +**Синтаксис** + +``` sql +ngramMinHashArg(string[, ngramsize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж из двух кортежей, каждый из которых состоит из `hashnum` n-грамм. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Пример** + +Запрос: + +``` sql +SELECT ngramMinHashArg('ClickHouse') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────────────┐ +│ (('ous','ick','lic','Hou','kHo','use'),('Hou','lic','ick','ous','ckH','Cli')) │ +└───────────────────────────────────────────────────────────────────────────────┘ +``` + +## ngramMinHashArgCaseInsensitive {#ngramminhashargcaseinsensitive} + +Выделяет из ASCII строки отрезки (n-граммы) размером `ngramsize` символов и возвращает n-граммы с минимальным и максимальным хешами, вычисленными функцией [ngramMinHashCaseInsensitive](#ngramminhashcaseinsensitive) с теми же входными данными. Функция регистро**не**зависимая. + +**Синтаксис** + +``` sql +ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж из двух кортежей, каждый из которых состоит из `hashnum` n-грамм. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Пример** + +Запрос: + +``` sql +SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────────────┐ +│ (('ous','ick','lic','kHo','use','Cli'),('kHo','lic','ick','ous','ckH','Hou')) │ +└───────────────────────────────────────────────────────────────────────────────┘ +``` + +## ngramMinHashArgUTF8 {#ngramminhashargutf8} + +Выделяет из UTF-8 строки отрезки (n-граммы) размером `ngramsize` символов и возвращает n-граммы с минимальным и максимальным хешами, вычисленными функцией [ngramMinHashUTF8](#ngramminhashutf8) с теми же входными данными. Функция регистрозависимая. + +**Синтаксис** + +``` sql +ngramMinHashArgUTF8(string[, ngramsize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж из двух кортежей, каждый из которых состоит из `hashnum` n-грамм. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Пример** + +Запрос: + +``` sql +SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────────────┐ +│ (('ous','ick','lic','Hou','kHo','use'),('kHo','Hou','lic','ick','ous','ckH')) │ +└───────────────────────────────────────────────────────────────────────────────┘ +``` + +## ngramMinHashArgCaseInsensitiveUTF8 {#ngramminhashargcaseinsensitiveutf8} + +Выделяет из UTF-8 строки отрезки (n-граммы) размером `ngramsize` символов и возвращает n-граммы с минимальным и максимальным хешами, вычисленными функцией [ngramMinHashCaseInsensitiveUTF8](#ngramminhashcaseinsensitiveutf8) с теми же входными данными. Функция регистро**не**зависимая. + +**Синтаксис** + +``` sql +ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `ngramsize` — размер n-грамм. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж из двух кортежей, каждый из которых состоит из `hashnum` n-грамм. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Пример** + +Запрос: + +``` sql +SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────────────┐ +│ (('ckH','ous','ick','lic','kHo','use'),('kHo','lic','ick','ous','ckH','Hou')) │ +└───────────────────────────────────────────────────────────────────────────────┘ +``` + +## wordShingleMinHash {#wordshingleminhash} + +Выделяет из ASCII строки отрезки (шинглы) из `shinglesize` слов и вычисляет хеш для каждого шингла. Использует `hashnum` минимальных хешей, чтобы вычислить минимальный хеш, и `hashnum` максимальных хешей, чтобы вычислить максимальный хеш. Возвращает кортеж из этих хешей. Функция регистрозависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. + +**Синтаксис** + +``` sql +wordShingleMinHash(string[, shinglesize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж с двумя хешами — минимальным и максимальным. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (16452112859864147620,5844417301642981317) │ +└────────────────────────────────────────────┘ +``` + +## wordShingleMinHashCaseInsensitive {#wordshingleminhashcaseinsensitive} + +Выделяет из ASCII строки отрезки (шинглы) из `shinglesize` слов и вычисляет хеш для каждого шингла. Использует `hashnum` минимальных хешей, чтобы вычислить минимальный хеш, и `hashnum` максимальных хешей, чтобы вычислить максимальный хеш. Возвращает кортеж из этих хешей. Функция регистро**не**зависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. + +**Синтаксис** + +``` sql +wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж с двумя хешами — минимальным и максимальным. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple─────────────────────────────────────┐ +│ (3065874883688416519,1634050779997673240) │ +└───────────────────────────────────────────┘ +``` + +## wordShingleMinHashUTF8 {#wordshingleminhashutf8} + +Выделяет из UTF-8 строки отрезки (шинглы) из `shinglesize` слов и вычисляет хеш для каждого шингла. Использует `hashnum` минимальных хешей, чтобы вычислить минимальный хеш, и `hashnum` максимальных хешей, чтобы вычислить максимальный хеш. Возвращает кортеж из этих хешей. Функция регистрозависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. + +**Синтаксис** + +``` sql +wordShingleMinHashUTF8(string[, shinglesize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж с двумя хешами — минимальным и максимальным. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple──────────────────────────────────────┐ +│ (16452112859864147620,5844417301642981317) │ +└────────────────────────────────────────────┘ +``` + +## wordShingleMinHashCaseInsensitiveUTF8 {#wordshingleminhashcaseinsensitiveutf8} + +Выделяет из UTF-8 строки отрезки (шинглы) из `shinglesize` слов и вычисляет хеш для каждого шингла. Использует `hashnum` минимальных хешей, чтобы вычислить минимальный хеш, и `hashnum` максимальных хешей, чтобы вычислить максимальный хеш. Возвращает кортеж из этих хешей. Функция регистро**не**зависимая. + +Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. + +**Синтаксис** + +``` sql +wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж с двумя хешами — минимальным и максимальным. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; +``` + +Результат: + +``` text +┌─Tuple─────────────────────────────────────┐ +│ (3065874883688416519,1634050779997673240) │ +└───────────────────────────────────────────┘ +``` + +## wordShingleMinHashArg {#wordshingleminhasharg} + +Выделяет из ASCII строки отрезки (шинглы) из `shinglesize` слов и возвращает шинглы с минимальным и максимальным хешами, вычисленными функцией [wordshingleMinHash](#wordshingleminhash) с теми же входными данными. Функция регистрозависимая. + +**Синтаксис** + +``` sql +wordShingleMinHashArg(string[, shinglesize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж из двух кортежей, каждый из которых состоит из `hashnum` шинглов. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; +``` + +Результат: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────┐ +│ (('OLAP','database','analytical'),('online','oriented','processing')) │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +## wordShingleMinHashArgCaseInsensitive {#wordshingleminhashargcaseinsensitive} + +Выделяет из ASCII строки отрезки (шинглы) из `shinglesize` слов и возвращает шинглы с минимальным и максимальным хешами, вычисленными функцией [wordShingleMinHashCaseInsensitive](#wordshingleminhashcaseinsensitive) с теми же входными данными. Функция регистро**не**зависимая. + +**Синтаксис** + +``` sql +wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж из двух кортежей, каждый из которых состоит из `hashnum` шинглов. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; +``` + +Результат: + +``` text +┌─Tuple──────────────────────────────────────────────────────────────────┐ +│ (('queries','database','analytical'),('oriented','processing','DBMS')) │ +└────────────────────────────────────────────────────────────────────────┘ +``` + +## wordShingleMinHashArgUTF8 {#wordshingleminhashargutf8} + +Выделяет из UTF-8 строки отрезки (шинглы) из `shinglesize` слов и возвращает шинглы с минимальным и максимальным хешами, вычисленными функцией [wordShingleMinHashUTF8](#wordshingleminhashutf8) с теми же входными данными. Функция регистрозависимая. + +**Синтаксис** + +``` sql +wordShingleMinHashArgUTF8(string[, shinglesize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж из двух кортежей, каждый из которых состоит из `hashnum` шинглов. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; +``` + +Результат: + +``` text +┌─Tuple─────────────────────────────────────────────────────────────────┐ +│ (('OLAP','database','analytical'),('online','oriented','processing')) │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +## wordShingleMinHashArgCaseInsensitiveUTF8 {#wordshingleminhashargcaseinsensitiveutf8} + +Выделяет из UTF-8 строки отрезки (шинглы) из `shinglesize` слов и возвращает шинглы с минимальным и максимальным хешами, вычисленными функцией [wordShingleMinHashCaseInsensitiveUTF8](#wordshingleminhashcaseinsensitiveutf8) с теми же входными данными. Функция регистро**не**зависимая. + +**Синтаксис** + +``` sql +wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum]) +``` + +**Аргументы** + +- `string` — строка. [String](../../sql-reference/data-types/string.md). +- `shinglesize` — размер словесных шинглов. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `3`. [UInt8](../../sql-reference/data-types/int-uint.md). +- `hashnum` — количество минимальных и максимальных хешей, которое используется при вычислении результата. Необязательный. Возможные значения: любое число от `1` до `25`. Значение по умолчанию: `6`. [UInt8](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Кортеж из двух кортежей, каждый из которых состоит из `hashnum` шинглов. + +Тип: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))). + +**Пример** + +Запрос: + +``` sql +SELECT wordShingleMinHashArgCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; +``` + +Результат: + +``` text +┌─Tuple──────────────────────────────────────────────────────────────────┐ +│ (('queries','database','analytical'),('oriented','processing','DBMS')) │ +└────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/ru/sql-reference/functions/ip-address-functions.md b/docs/ru/sql-reference/functions/ip-address-functions.md index 96adad10621..b02d45d7667 100644 --- a/docs/ru/sql-reference/functions/ip-address-functions.md +++ b/docs/ru/sql-reference/functions/ip-address-functions.md @@ -395,3 +395,54 @@ SELECT addr, isIPv6String(addr) FROM ( SELECT ['::', '1111::ffff', '::ffff:127.0 └──────────────────┴────────────────────┘ ``` +## isIPAddressInRange {#isipaddressinrange} + +Проверяет, попадает ли IP адрес в интервал, заданный в нотации [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). + +**Синтаксис** + +``` sql +isIPAddressInRange(address, prefix) +``` +Функция принимает IPv4 или IPv6 адрес виде строки. Возвращает `0`, если версия адреса и интервала не совпадают. + +**Аргументы** + +- `address` — IPv4 или IPv6 адрес. [String](../../sql-reference/data-types/string.md). +- `prefix` — IPv4 или IPv6 подсеть, заданная в нотации CIDR. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- `1` или `0`. + +Тип: [UInt8](../../sql-reference/data-types/int-uint.md). + +**Примеры** + +Запрос: + +``` sql +SELECT isIPAddressInRange('127.0.0.1', '127.0.0.0/8'); +``` + +Результат: + +``` text +┌─isIPAddressInRange('127.0.0.1', '127.0.0.0/8')─┐ +│ 1 │ +└────────────────────────────────────────────────┘ +``` + +Запрос: + +``` sql +SELECT isIPAddressInRange('127.0.0.1', 'ffff::/16'); +``` + +Результат: + +``` text +┌─isIPAddressInRange('127.0.0.1', 'ffff::/16')─┐ +│ 0 │ +└──────────────────────────────────────────────┘ +``` diff --git a/docs/ru/sql-reference/functions/json-functions.md b/docs/ru/sql-reference/functions/json-functions.md index 5d419d26981..8941ccc1691 100644 --- a/docs/ru/sql-reference/functions/json-functions.md +++ b/docs/ru/sql-reference/functions/json-functions.md @@ -16,51 +16,65 @@ toc_title: JSON ## visitParamHas(params, name) {#visitparamhasparams-name} -Проверить наличие поля с именем name. +Проверяет наличие поля с именем `name`. + +Синоним: `simpleJSONHas`. ## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name} -Распарсить UInt64 из значения поля с именем name. Если поле строковое - попытаться распарсить число из начала строки. Если такого поля нет, или если оно есть, но содержит не число, то вернуть 0. +Пытается выделить число типа UInt64 из значения поля с именем `name`. Если поле строковое, пытается выделить число из начала строки. Если такого поля нет, или если оно есть, но содержит не число, то возвращает 0. + +Синоним: `simpleJSONExtractUInt`. ## visitParamExtractInt(params, name) {#visitparamextractintparams-name} Аналогично для Int64. +Синоним: `simpleJSONExtractInt`. + ## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name} Аналогично для Float64. +Синоним: `simpleJSONExtractFloat`. + ## visitParamExtractBool(params, name) {#visitparamextractboolparams-name} -Распарсить значение true/false. Результат - UInt8. +Пытается выделить значение true/false. Результат — UInt8. + +Синоним: `simpleJSONExtractBool`. ## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name} -Вернуть значение поля, включая разделители. +Возвращает значение поля, включая разделители. + +Синоним: `simpleJSONExtractRaw`. Примеры: ``` sql -visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' -visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' +visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"'; +visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}'; ``` ## visitParamExtractString(params, name) {#visitparamextractstringparams-name} -Распарсить строку в двойных кавычках. У значения убирается экранирование. Если убрать экранированные символы не удалось, то возвращается пустая строка. +Разбирает строку в двойных кавычках. У значения убирается экранирование. Если убрать экранированные символы не удалось, то возвращается пустая строка. + +Синоним: `simpleJSONExtractString`. Примеры: ``` sql -visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' -visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' -visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' -visitParamExtractString('{"abc":"hello}', 'abc') = '' +visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0'; +visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺'; +visitParamExtractString('{"abc":"\\u263"}', 'abc') = ''; +visitParamExtractString('{"abc":"hello}', 'abc') = ''; ``` -На данный момент, не поддерживаются записанные в формате `\uXXXX\uYYYY` кодовые точки не из basic multilingual plane (они переводятся не в UTF-8, а в CESU-8). +На данный момент не поддерживаются записанные в формате `\uXXXX\uYYYY` кодовые точки не из basic multilingual plane (они переводятся не в UTF-8, а в CESU-8). -Следующие функции используют [simdjson](https://github.com/lemire/simdjson) который разработан под более сложные требования для разбора JSON. Упомянутое выше предположение 2 по-прежнему применимо. +Следующие функции используют [simdjson](https://github.com/lemire/simdjson), который разработан под более сложные требования для разбора JSON. Упомянутое выше допущение 2 по-прежнему применимо. ## isValidJSON(json) {#isvalidjsonjson} @@ -292,4 +306,3 @@ SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello" │ [('d','"hello"'),('f','"world"')] │ └───────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index f9b3e5c3e68..84bbc6af968 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1133,6 +1133,111 @@ SELECT defaultValueOfTypeName('Nullable(Int8)') └──────────────────────────────────────────┘ ``` +## indexHint {#indexhint} +Возвращает все данные из диапазона, в который попадают данные, соответствующие указанному выражению. +Переданное выражение не будет вычислено. Выбор диапазона производится по индексу. +Индекс в ClickHouse разреженный, при чтении диапазона в ответ попадают «лишние» соседние данные. + +**Синтаксис** + +```sql +SELECT * FROM table WHERE indexHint() +``` + +**Возвращаемое значение** + +Возвращает диапазон индекса, в котором выполняется заданное условие. + +Тип: [Uint8](https://clickhouse.yandex/docs/ru/data_types/int_uint/#diapazony-uint). + +**Пример** + +Рассмотрим пример с использованием тестовых данных таблицы [ontime](../../getting-started/example-datasets/ontime.md). + +Исходная таблица: + +```sql +SELECT count() FROM ontime +``` + +```text +┌─count()─┐ +│ 4276457 │ +└─────────┘ +``` + +В таблице есть индексы по полям `(FlightDate, (Year, FlightDate))`. + +Выполним выборку по дате, где индекс не используется. + +Запрос: + +```sql +SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k +``` + +ClickHouse обработал всю таблицу (`Processed 4.28 million rows`). + +Результат: + +```text +┌──────────k─┬─count()─┐ +│ 2017-01-01 │ 13970 │ +│ 2017-01-02 │ 15882 │ +........................ +│ 2017-09-28 │ 16411 │ +│ 2017-09-29 │ 16384 │ +│ 2017-09-30 │ 12520 │ +└────────────┴─────────┘ +``` + +Для подключения индекса выбираем конкретную дату. + +Запрос: + +```sql +SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k +``` + +При использовании индекса ClickHouse обработал значительно меньшее количество строк (`Processed 32.74 thousand rows`). + +Результат: + +```text +┌──────────k─┬─count()─┐ +│ 2017-09-15 │ 16428 │ +└────────────┴─────────┘ +``` + +Передадим в функцию `indexHint` выражение `k = '2017-09-15'`. + +Запрос: + +```sql +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE indexHint(k = '2017-09-15') +GROUP BY k +ORDER BY k ASC +``` + +ClickHouse применил индекс по аналогии с примером выше (`Processed 32.74 thousand rows`). +Выражение `k = '2017-09-15'` не используется при формировании результата. +Функция `indexHint` позволяет увидеть соседние данные. + +Результат: + +```text +┌──────────k─┬─count()─┐ +│ 2017-09-14 │ 7071 │ +│ 2017-09-15 │ 16428 │ +│ 2017-09-16 │ 1077 │ +│ 2017-09-30 │ 8167 │ +└────────────┴─────────┘ +``` + ## replicate {#other-functions-replicate} Создает массив, заполненный одним значением. diff --git a/docs/ru/sql-reference/functions/splitting-merging-functions.md b/docs/ru/sql-reference/functions/splitting-merging-functions.md index b8d04982b91..5a0c540cf3a 100644 --- a/docs/ru/sql-reference/functions/splitting-merging-functions.md +++ b/docs/ru/sql-reference/functions/splitting-merging-functions.md @@ -14,7 +14,7 @@ separator должен быть константной строкой из ро **Синтаксис** ``` sql -splitByChar(, ) +splitByChar(separator, s) ``` **Аргументы** @@ -30,12 +30,12 @@ splitByChar(, ) - Задано несколько последовательных разделителей; - Исходная строка `s` пуста. -Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md). +Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). **Пример** ``` sql -SELECT splitByChar(',', '1,2,3,abcde') +SELECT splitByChar(',', '1,2,3,abcde'); ``` ``` text @@ -67,12 +67,12 @@ splitByString(separator, s) - Задано несколько последовательных разделителей; - Исходная строка `s` пуста. -Тип: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md). +Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). **Примеры** ``` sql -SELECT splitByString(', ', '1, 2 3, 4,5, abcde') +SELECT splitByString(', ', '1, 2 3, 4,5, abcde'); ``` ``` text @@ -82,7 +82,7 @@ SELECT splitByString(', ', '1, 2 3, 4,5, abcde') ``` ``` sql -SELECT splitByString('', 'abcde') +SELECT splitByString('', 'abcde'); ``` ``` text @@ -91,6 +91,60 @@ SELECT splitByString('', 'abcde') └────────────────────────────┘ ``` +## splitByRegexp(regexp, s) {#splitbyregexpseparator-s} + +Разбивает строку на подстроки, разделенные регулярным выражением. В качестве разделителя используется строка регулярного выражения `regexp`. Если `regexp` пустая, функция разделит строку `s` на массив одиночных символов. Если для регулярного выражения совпадения не найдено, строка `s` не будет разбита. + +**Синтаксис** + +``` sql +splitByRegexp(regexp, s) +``` + +**Аргументы** + +- `regexp` — регулярное выражение. Константа. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). +- `s` — разбиваемая строка. [String](../../sql-reference/data-types/string.md). + +**Возвращаемые значения** + +Возвращает массив выбранных подстрок. Пустая подстрока может быть возвращена, если: + +- Непустое совпадение с регулярным выражением происходит в начале или конце строки; +- Имеется несколько последовательных совпадений c непустым регулярным выражением; +- Исходная строка `s` пуста, а регулярное выражение не пустое. + +Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). + +**Примеры** + +Запрос: + +``` sql +SELECT splitByRegexp('\\d+', 'a12bc23de345f'); +``` + +Результат: + +``` text +┌─splitByRegexp('\\d+', 'a12bc23de345f')─┐ +│ ['a','bc','de','f'] │ +└────────────────────────────────────────┘ +``` + +Запрос: + +``` sql +SELECT splitByRegexp('', 'abcde'); +``` + +Результат: + +``` text +┌─splitByRegexp('', 'abcde')─┐ +│ ['a','b','c','d','e'] │ +└────────────────────────────┘ +``` ## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator} @@ -106,7 +160,7 @@ separator - необязательный параметр, константна **Пример:** ``` sql -SELECT alphaTokens('abca1abc') +SELECT alphaTokens('abca1abc'); ``` ``` text @@ -114,4 +168,3 @@ SELECT alphaTokens('abca1abc') │ ['abca','abc'] │ └─────────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/functions/string-functions.md b/docs/ru/sql-reference/functions/string-functions.md index 6ef7dc01b6a..04af599c09a 100644 --- a/docs/ru/sql-reference/functions/string-functions.md +++ b/docs/ru/sql-reference/functions/string-functions.md @@ -645,3 +645,66 @@ SELECT decodeXMLComponent('< Σ >'); - [Мнемоники в HTML](https://ru.wikipedia.org/wiki/%D0%9C%D0%BD%D0%B5%D0%BC%D0%BE%D0%BD%D0%B8%D0%BA%D0%B8_%D0%B2_HTML) + + +## extractTextFromHTML {#extracttextfromhtml} + +Функция для извлечения текста из HTML или XHTML. +Она не соответствует всем HTML, XML или XHTML стандартам на 100%, но ее реализация достаточно точная и быстрая. Правила обработки следующие: + +1. Комментарии удаляются. Пример: ``. Комментарий должен оканчиваться символами `-->`. Вложенные комментарии недопустимы. +Примечание: конструкции наподобие `` и `` не являются допустимыми комментариями в HTML, но они будут удалены согласно другим правилам. +2. Содержимое CDATA вставляется дословно. Примечание: формат CDATA специфичен для XML/XHTML. Но он обрабатывается всегда по принципу "наилучшего возможного результата". +3. Элементы `script` и `style` удаляются вместе со всем содержимым. Примечание: предполагается, что закрывающий тег не может появиться внутри содержимого. Например, в JS строковый литерал должен быть экранирован как `"<\/script>"`. +Примечание: комментарии и CDATA возможны внутри `script` или `style` - тогда закрывающие теги не ищутся внутри CDATA. Пример: `]]>`. Но они ищутся внутри комментариев. Иногда возникают сложные случаи: ` var y = "-->"; alert(x + y);` +Примечание: `script` и `style` могут быть названиями пространств имен XML - тогда они не обрабатываются как обычные элементы `script` или `style`. Пример: `Hello`. +Примечание: пробелы возможны после имени закрывающего тега: ``, но не перед ним: `< / script>`. +4. Другие теги или элементы, подобные тегам, удаляются, а их внутреннее содержимое остается. Пример: `.` +Примечание: ожидается, что такой HTML является недопустимым: `` +Примечание: функция также удаляет подобные тегам элементы: `<>`, ``, и т. д. +Примечание: если встречается тег без завершающего символа `>`, то удаляется этот тег и весь следующий за ним текст: `world`, `Helloworld` — в HTML нет пробелов, но функция вставляет их. Также следует учитывать такие варианты написания: `Hello

world

`, `Hello
world`. Подобные результаты выполнения функции могут использоваться для анализа данных, например, для преобразования HTML-текста в набор используемых слов. +7. Также обратите внимание, что правильная обработка пробелов требует поддержки `
` и свойств CSS `display` и `white-space`.
+
+**Синтаксис**
+
+``` sql
+extractTextFromHTML(x)
+```
+
+**Аргументы**
+
+-   `x` — текст для обработки. [String](../../sql-reference/data-types/string.md). 
+
+**Возвращаемое значение**
+
+-   Извлеченный текст.
+
+Тип: [String](../../sql-reference/data-types/string.md).
+
+**Пример**
+
+Первый пример содержит несколько тегов и комментарий. На этом примере также видно, как обрабатываются пробелы.
+Второй пример показывает обработку `CDATA` и тега `script`.
+В третьем примере текст выделяется из полного HTML ответа, полученного с помощью функции [url](../../sql-reference/table-functions/url.md).
+
+Запрос:
+
+``` sql
+SELECT extractTextFromHTML(' 

A text withtags.

'); +SELECT extractTextFromHTML('CDATA]]> '); +SELECT extractTextFromHTML(html) FROM url('http://www.donothingfor2minutes.com/', RawBLOB, 'html String'); +``` + +Результат: + +``` text +A text with tags . +The content within CDATA +Do Nothing for 2 Minutes 2:00   +``` diff --git a/docs/ru/sql-reference/functions/string-search-functions.md b/docs/ru/sql-reference/functions/string-search-functions.md index 658b0624b83..2417a1c6ffd 100644 --- a/docs/ru/sql-reference/functions/string-search-functions.md +++ b/docs/ru/sql-reference/functions/string-search-functions.md @@ -7,7 +7,7 @@ toc_title: "Функции поиска в строках" Во всех функциях, поиск регистрозависимый по умолчанию. Существуют варианты функций для регистронезависимого поиска. -## position(haystack, needle) {#position} +## position(haystack, needle), locate(haystack, needle) {#position} Поиск подстроки `needle` в строке `haystack`. @@ -21,8 +21,15 @@ toc_title: "Функции поиска в строках" position(haystack, needle[, start_pos]) ``` +``` sql +position(needle IN haystack) +``` + Алиас: `locate(haystack, needle[, start_pos])`. +!!! note "Примечание" + Синтаксис `position(needle IN haystack)` обеспечивает совместимость с SQL, функция работает так же, как `position(haystack, needle)`. + **Аргументы** - `haystack` — строка, по которой выполняется поиск. [Строка](../syntax.md#syntax-string-literal). @@ -70,6 +77,36 @@ SELECT position('Привет, мир!', '!'); └───────────────────────────────┘ ``` +**Примеры работы функции с синтаксисом POSITION(needle IN haystack)** + +Запрос: + +```sql +SELECT 1 = position('абв' IN 'абв'); +``` + +Результат: + +```text +┌─equals(1, position('абв', 'абв'))─┐ +│ 1 │ +└───────────────────────────────────┘ +``` + +Запрос: + +```sql +SELECT 0 = position('абв' IN ''); +``` + +Результат: + +```text +┌─equals(0, position('', 'абв'))─┐ +│ 1 │ +└────────────────────────────────┘ +``` + ## positionCaseInsensitive {#positioncaseinsensitive} Такая же, как и [position](#position), но работает без учета регистра. Возвращает позицию в байтах найденной подстроки в строке, начиная с 1. @@ -758,4 +795,3 @@ SELECT countSubstringsCaseInsensitiveUTF8('аБв__АбВ__абв', 'Абв'); │ 3 │ └────────────────────────────────────────────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/functions/tuple-functions.md b/docs/ru/sql-reference/functions/tuple-functions.md index a56eac27db2..381743a450b 100644 --- a/docs/ru/sql-reference/functions/tuple-functions.md +++ b/docs/ru/sql-reference/functions/tuple-functions.md @@ -111,3 +111,55 @@ SELECT untuple((* EXCEPT (v2, v3),)) FROM kv; - [Tuple](../../sql-reference/data-types/tuple.md) +## tupleHammingDistance {#tuplehammingdistance} + +Возвращает [расстояние Хэмминга](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%A5%D1%8D%D0%BC%D0%BC%D0%B8%D0%BD%D0%B3%D0%B0) между двумя кортежами одинакового размера. + +**Синтаксис** + +``` sql +tupleHammingDistance(tuple1, tuple2) +``` + +**Аргументы** + +- `tuple1` — первый кортеж. [Tuple](../../sql-reference/data-types/tuple.md). +- `tuple2` — второй кортеж. [Tuple](../../sql-reference/data-types/tuple.md). + +Кортежи должны иметь одинаковый размер и тип элементов. + +**Возвращаемое значение** + +- Расстояние Хэмминга. + +Тип: [UInt8](../../sql-reference/data-types/int-uint.md). + +**Примеры** + +Запрос: + +``` sql +SELECT tupleHammingDistance((1, 2, 3), (3, 2, 1)) AS HammingDistance; +``` + +Результат: + +``` text +┌─HammingDistance─┐ +│ 2 │ +└─────────────────┘ +``` + +Может быть использовано с функциями [MinHash](../../sql-reference/functions/hash-functions.md#ngramminhash) для проверки строк на совпадение: + +``` sql +SELECT tupleHammingDistance(wordShingleMinHash(string), wordShingleMinHashCaseInsensitive(string)) as HammingDistance FROM (SELECT 'Clickhouse is a column-oriented database management system for online analytical processing of queries.' AS string); +``` + +Результат: + +``` text +┌─HammingDistance─┐ +│ 2 │ +└─────────────────┘ +``` diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 1bb7396ae3a..8707642eb59 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -3,7 +3,7 @@ toc_priority: 38 toc_title: "Функции преобразования типов" --- -# Функции преобразования типов {#funktsii-preobrazovaniia-tipov} +# Функции преобразования типов {#type-conversion-functions} ## Общие проблемы преобразования чисел {#numeric-conversion-issues} @@ -369,7 +369,7 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut; ## reinterpretAsUUID {#reinterpretasuuid} -Функция принимает шестнадцатибайтную строку и интерпретирует ее байты в network order (big-endian). Если строка имеет недостаточную длину, то функция работает так, как будто строка дополнена необходимым количетсвом нулевых байт с конца. Если строка длиннее, чем шестнадцать байт, то игнорируются лишние байты с конца. +Функция принимает строку из 16 байт и интерпретирует ее байты в порядок от старшего к младшему. Если строка имеет недостаточную длину, то функция работает так, как будто строка дополнена необходимым количеством нулевых байтов с конца. Если строка длиннее, чем 16 байтов, то лишние байты с конца игнорируются. **Синтаксис** @@ -423,15 +423,85 @@ SELECT uuid = uuid2; └─────────────────────┘ ``` +## reinterpret(x, T) {#type_conversion_function-reinterpret} + +Использует ту же самую исходную последовательность байтов в памяти для значения `x` и интерпретирует ее как конечный тип данных `T`. + +**Синтаксис** + +``` sql +reinterpret(x, type) +``` + +**Аргументы** + +- `x` — любой тип данных. +- `type` — конечный тип данных. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- Значение конечного типа данных. + +**Примеры** + +Запрос: + +```sql +SELECT reinterpret(toInt8(-1), 'UInt8') as int_to_uint, + reinterpret(toInt8(1), 'Float32') as int_to_float, + reinterpret('1', 'UInt32') as string_to_int; +``` + +Результат: + +``` +┌─int_to_uint─┬─int_to_float─┬─string_to_int─┐ +│ 255 │ 1e-45 │ 49 │ +└─────────────┴──────────────┴───────────────┘ +``` + ## CAST(x, T) {#type_conversion_function-cast} -Преобразует входное значение `x` в указанный тип данных `T`. +Преобразует входное значение `x` в указанный тип данных `T`. В отличии от функции `reinterpret` использует внешнее представление значения `x`. Поддерживается также синтаксис `CAST(x AS t)`. -Обратите внимание, что если значение `x` не может быть преобразовано к типу `T`, возникает переполнение. Например, `CAST(-1, 'UInt8')` возвращает 255. +!!! warning "Предупреждение" + Если значение `x` не может быть преобразовано к типу `T`, возникает переполнение. Например, `CAST(-1, 'UInt8')` возвращает 255. -**Пример** +**Синтаксис** + +``` sql +CAST(x, T) +``` + +**Аргументы** + +- `x` — любой тип данных. +- `T` — конечный тип данных. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- Значение конечного типа данных. + +**Примеры** + +Запрос: + +```sql +SELECT + CAST(toInt8(-1), 'UInt8') AS cast_int_to_uint, + CAST(toInt8(1), 'Float32') AS cast_int_to_float, + CAST('1', 'UInt32') AS cast_string_to_int +``` + +Результат: + +``` +┌─cast_int_to_uint─┬─cast_int_to_float─┬─cast_string_to_int─┐ +│ 255 │ 1 │ 1 │ +└──────────────────┴───────────────────┴────────────────────┘ +``` Запрос: @@ -452,9 +522,9 @@ SELECT └─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ ``` -Преобразование в FixedString(N) работает только для аргументов типа String или FixedString(N). +Преобразование в FixedString(N) работает только для аргументов типа [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). -Поддержано преобразование к типу [Nullable](../../sql-reference/functions/type-conversion-functions.md) и обратно. +Поддерживается преобразование к типу [Nullable](../../sql-reference/functions/type-conversion-functions.md) и обратно. **Примеры** @@ -511,7 +581,8 @@ SELECT cast(-1, 'UInt8') as uint8; ``` text ┌─uint8─┐ │ 255 │ -└───── +└───────┘ +``` Запрос: @@ -627,6 +698,7 @@ SELECT ``` ## parseDateTimeBestEffort {#parsedatetimebesteffort} +## parseDateTime32BestEffort {#parsedatetime32besteffort} Преобразует дату и время в [строковом](../../sql-reference/functions/type-conversion-functions.md) представлении к типу данных [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime). @@ -814,6 +886,16 @@ AS parseDateTimeBestEffortUS; └─────────────────────────——┘ ``` +## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} +## parseDateTime32BestEffortOrNull {#parsedatetime32besteffortornull} + +Работает также как [parseDateTimeBestEffort](#parsedatetimebesteffort), но возвращает `NULL` когда получает формат даты который не может быть обработан. + +## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} +## parseDateTime32BestEffortOrZero {#parsedatetime32besteffortorzero} + +Работает аналогично функции [parseDateTimeBestEffort](#parsedatetimebesteffort), но возвращает нулевое значение, если формат даты не может быть обработан. + ## parseDateTimeBestEffortUSOrNull {#parsedatetimebesteffortusornull} Работает аналогично функции [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS), но в отличие от нее возвращает `NULL`, если входная строка не может быть преобразована в тип данных [DateTime](../../sql-reference/data-types/datetime.md). @@ -986,16 +1068,113 @@ SELECT parseDateTimeBestEffortUSOrZero('02.2021') AS parseDateTimeBestEffortUSOr └─────────────────────────────────┘ ``` -## toUnixTimestamp64Milli -## toUnixTimestamp64Micro -## toUnixTimestamp64Nano +## parseDateTime64BestEffort {#parsedatetime64besteffort} -Преобразует значение `DateTime64` в значение `Int64` с фиксированной точностью менее одной секунды. -Входное значение округляется соответствующим образом вверх или вниз в зависимости от его точности. Обратите внимание, что возвращаемое значение - это временная метка в UTC, а не в часовом поясе `DateTime64`. +Работает аналогично функции [parseDateTimeBestEffort](#parsedatetimebesteffort), но также принимает миллисекунды и микросекунды. Возвращает тип данных [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime). **Синтаксис** ``` sql +parseDateTime64BestEffort(time_string [, precision [, time_zone]]) +``` + +**Аргументы** + +- `time_string` — строка, содержащая дату или дату со временем, которые нужно преобразовать. [String](../../sql-reference/data-types/string.md). +- `precision` — требуемая точность: `3` — для миллисекунд, `6` — для микросекунд. По умолчанию — `3`. Необязательный. [UInt8](../../sql-reference/data-types/int-uint.md). +- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Разбирает значение `time_string` в зависимости от часового пояса. Необязательный. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- `time_string`, преобразованная в тип данных [DateTime](../../sql-reference/data-types/datetime.md). + +**Примеры** + +Запрос: + +```sql +SELECT parseDateTime64BestEffort('2021-01-01') AS a, toTypeName(a) AS t +UNION ALL +SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346') AS a, toTypeName(a) AS t +UNION ALL +SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346',6) AS a, toTypeName(a) AS t +UNION ALL +SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346',3,'Europe/Moscow') AS a, toTypeName(a) AS t +FORMAT PrettyCompactMonoBlock; +``` + +Результат: + +``` +┌──────────────────────────a─┬─t──────────────────────────────┐ +│ 2021-01-01 01:01:00.123000 │ DateTime64(3) │ +│ 2021-01-01 00:00:00.000000 │ DateTime64(3) │ +│ 2021-01-01 01:01:00.123460 │ DateTime64(6) │ +│ 2020-12-31 22:01:00.123000 │ DateTime64(3, 'Europe/Moscow') │ +└────────────────────────────┴────────────────────────────────┘ +``` + +## parseDateTime64BestEffortOrNull {#parsedatetime32besteffortornull} + +Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но возвращает `NULL`, если формат даты не может быть обработан. + +## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero} + +Работает аналогично функции [parseDateTime64BestEffort](#parsedatetimebesteffort), но возвращает нулевую дату и время, если формат даты не может быть обработан. + +## toLowCardinality {#tolowcardinality} + +Преобразует входные данные в версию [LowCardianlity](../data-types/lowcardinality.md) того же типа данных. + +Чтобы преобразовать данные из типа `LowCardinality`, используйте функцию [CAST](#type_conversion_function-cast). Например, `CAST(x as String)`. + +**Синтаксис** + +```sql +toLowCardinality(expr) +``` + +**Аргументы** + +- `expr` — [выражение](../syntax.md#syntax-expressions), которое в результате преобразуется в один из [поддерживаемых типов данных](../data-types/index.md#data_types). + +**Возвращаемое значение** + +- Результат преобразования `expr`. + +Тип: `LowCardinality(expr_result_type)` + +**Пример** + +Запрос: + +```sql +SELECT toLowCardinality('1'); +``` + +Результат: + +```text +┌─toLowCardinality('1')─┐ +│ 1 │ +└───────────────────────┘ +``` + +## toUnixTimestamp64Milli {#tounixtimestamp64milli} + +## toUnixTimestamp64Micro {#tounixtimestamp64micro} + +## toUnixTimestamp64Nano {#tounixtimestamp64nano} + +Преобразует значение `DateTime64` в значение `Int64` с фиксированной точностью менее одной секунды. +Входное значение округляется соответствующим образом вверх или вниз в зависимости от его точности. + +!!! info "Примечание" + Возвращаемое значение — это временная метка в UTC, а не в часовом поясе `DateTime64`. + +**Синтаксис** + +```sql toUnixTimestamp64Milli(value) ``` @@ -1011,7 +1190,7 @@ toUnixTimestamp64Milli(value) Запрос: -``` sql +```sql WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64 SELECT toUnixTimestamp64Milli(dt64); ``` @@ -1039,9 +1218,11 @@ SELECT toUnixTimestamp64Nano(dt64); └─────────────────────────────┘ ``` -## fromUnixTimestamp64Milli -## fromUnixTimestamp64Micro -## fromUnixTimestamp64Nano +## fromUnixTimestamp64Milli {#fromunixtimestamp64milli} + +## fromUnixTimestamp64Micro {#fromunixtimestamp64micro} + +## fromUnixTimestamp64Nano {#fromunixtimestamp64nano} Преобразует значение `Int64` в значение `DateTime64` с фиксированной точностью менее одной секунды и дополнительным часовым поясом. Входное значение округляется соответствующим образом вверх или вниз в зависимости от его точности. Обратите внимание, что входное значение обрабатывается как метка времени UTC, а не метка времени в заданном (или неявном) часовом поясе. @@ -1077,45 +1258,6 @@ SELECT fromUnixTimestamp64Milli(i64, 'UTC'); └──────────────────────────────────────┘ ``` -## toLowCardinality {#tolowcardinality} - -Преобразует входные данные в версию [LowCardianlity](../data-types/lowcardinality.md) того же типа данных. - -Чтобы преобразовать данные из типа `LowCardinality`, используйте функцию [CAST](#type_conversion_function-cast). Например, `CAST(x as String)`. - -**Синтаксис** - -```sql -toLowCardinality(expr) -``` - -**Аргументы** - -- `expr` — [выражение](../syntax.md#syntax-expressions), которое в результате преобразуется в один из [поддерживаемых типов данных](../data-types/index.md#data_types). - - -**Возвращаемое значение** - -- Результат преобразования `expr`. - -Тип: `LowCardinality(expr_result_type)` - -**Пример** - -Запрос: - -```sql -SELECT toLowCardinality('1'); -``` - -Результат: - -```text -┌─toLowCardinality('1')─┐ -│ 1 │ -└───────────────────────┘ -``` - ## formatRow {#formatrow} Преобразует произвольные выражения в строку заданного формата. @@ -1194,4 +1336,3 @@ FROM numbers(3); │ 2,"good" │ └───────────────────────────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/statements/alter/column.md b/docs/ru/sql-reference/statements/alter/column.md index f51859b46f6..158ab2e7385 100644 --- a/docs/ru/sql-reference/statements/alter/column.md +++ b/docs/ru/sql-reference/statements/alter/column.md @@ -63,6 +63,9 @@ DROP COLUMN [IF EXISTS] name Запрос удаляет данные из файловой системы. Так как это представляет собой удаление целых файлов, запрос выполняется почти мгновенно. +!!! warning "Предупреждение" + Вы не можете удалить столбец, используемый в [материализованном представлениии](../../../sql-reference/statements/create/view.md#materialized). В противном случае будет ошибка. + Пример: ``` sql @@ -117,7 +120,7 @@ MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] [AFTER name_after | F - TTL - Примеры изменения TTL столбца смотрите в разделе [TTL столбца](ttl.md#mergetree-column-ttl). + Примеры изменения TTL столбца смотрите в разделе [TTL столбца](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl). Если указано `IF EXISTS`, запрос не возвращает ошибку, если столбца не существует. @@ -155,7 +158,7 @@ ALTER TABLE table_name MODIFY column_name REMOVE property; ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; ``` -## Смотрите также +**Смотрите также** - [REMOVE TTL](ttl.md). diff --git a/docs/ru/sql-reference/statements/alter/index/index.md b/docs/ru/sql-reference/statements/alter/index/index.md index 862def5cc04..632f11ed906 100644 --- a/docs/ru/sql-reference/statements/alter/index/index.md +++ b/docs/ru/sql-reference/statements/alter/index/index.md @@ -9,8 +9,9 @@ toc_title: "Манипуляции с индексами" Добавить или удалить индекс можно с помощью операций ``` sql -ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name] -ALTER TABLE [db].name DROP INDEX name +ALTER TABLE [db.]name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name] +ALTER TABLE [db.]name DROP INDEX name +ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name ``` Поддерживается только таблицами семейства `*MergeTree`. @@ -18,6 +19,7 @@ ALTER TABLE [db].name DROP INDEX name Команда `ADD INDEX` добавляет описание индексов в метаданные, а `DROP INDEX` удаляет индекс из метаданных и стирает файлы индекса с диска, поэтому они легковесные и работают мгновенно. Если индекс появился в метаданных, то он начнет считаться в последующих слияниях и записях в таблицу, а не сразу после выполнения операции `ALTER`. +`MATERIALIZE INDEX` - перестраивает индекс в указанной партиции. Реализовано как мутация. Запрос на изменение индексов реплицируется, сохраняя новые метаданные в ZooKeeper и применяя изменения на всех репликах. diff --git a/docs/ru/sql-reference/statements/alter/partition.md b/docs/ru/sql-reference/statements/alter/partition.md index 3e7b069b066..02a87406e86 100644 --- a/docs/ru/sql-reference/statements/alter/partition.md +++ b/docs/ru/sql-reference/statements/alter/partition.md @@ -38,7 +38,7 @@ ALTER TABLE mt DETACH PART 'all_2_2_0'; После того как запрос будет выполнен, вы сможете производить любые операции с данными в директории `detached`. Например, можно удалить их из файловой системы. -Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплики-лидера, запрос вернет ошибку. +Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплик-лидеров (поскольку допускается несколько лидеров), запрос вернет ошибку. ## DROP PARTITION\|PART {#alter_drop-partition} @@ -83,9 +83,13 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0; Как корректно задать имя партиции или куска, см. в разделе [Как задавать имя партиции в запросах ALTER](#alter-how-to-specify-part-expr). -Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`. Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу. Все остальные реплики загружают данные с реплики-инициатора запроса. +Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`. +Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу. -Это означает, что вы можете разместить данные в директории `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах. +Если реплика, не являющаяся инициатором запроса, получив команду присоединения, находит кусок с правильными контрольными суммами в своей собственной папке `detached`, она присоединяет данные, не скачивая их с других реплик. +Если нет куска с правильными контрольными суммами, данные загружаются из любой реплики, имеющей этот кусок. + +Вы можете поместить данные в директорию `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах. ## ATTACH PARTITION FROM {#alter_attach-partition-from} @@ -93,7 +97,8 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0; ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 ``` -Копирует партицию из таблицы `table1` в таблицу `table2` и добавляет к существующим данным `table2`. Данные из `table1` не удаляются. +Копирует партицию из таблицы `table1` в таблицу `table2`. +Обратите внимание, что данные не удаляются ни из `table1`, ни из `table2`. Следует иметь в виду: @@ -305,4 +310,3 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; `IN PARTITION` указывает на партицию, для которой применяются выражения [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) или [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) в результате запроса `ALTER TABLE`. Новые куски создаются только в указанной партиции. Таким образом, `IN PARTITION` помогает снизить нагрузку, когда таблица разбита на множество партиций, а вам нужно обновить данные лишь точечно. Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). - diff --git a/docs/ru/sql-reference/statements/alter/ttl.md b/docs/ru/sql-reference/statements/alter/ttl.md index e949c992bbe..2a2d10b69de 100644 --- a/docs/ru/sql-reference/statements/alter/ttl.md +++ b/docs/ru/sql-reference/statements/alter/ttl.md @@ -82,4 +82,4 @@ SELECT * FROM table_with_ttl; ### Смотрите также - Подробнее о [свойстве TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl). - +- Изменить столбец [с TTL](../../../sql-reference/statements/alter/column.md#alter_modify-column). \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/user.md b/docs/ru/sql-reference/statements/alter/user.md index 604eff9de15..bb57c3bb328 100644 --- a/docs/ru/sql-reference/statements/alter/user.md +++ b/docs/ru/sql-reference/statements/alter/user.md @@ -12,20 +12,32 @@ toc_title: USER ``` sql ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1] [, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...] - [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] - [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}] + [[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] - [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...] ``` Для выполнения `ALTER USER` необходима привилегия [ALTER USER](../grant.md#grant-access-management). +## Секция GRANTEES {#grantees} + +Определяет пользователей или роли, которым разрешено получать [привилегии](../../../sql-reference/statements/grant.md#grant-privileges) от указанного пользователя при условии, что этому пользователю также предоставлен весь необходимый доступ с использованием [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Параметры секции `GRANTEES`: + +- `user` — пользователь, которому разрешено получать привилегии от указанного пользователя. +- `role` — роль, которой разрешено получать привилегии от указанного пользователя. +- `ANY` — любому пользователю или любой роли разрешено получать привилегии от указанного пользователя. Используется по умолчанию. +- `NONE` — никому не разрешено получать привилегии от указанного пользователя. + +Вы можете исключить любого пользователя или роль, используя выражение `EXCEPT`. Например, `ALTER USER user1 GRANTEES ANY EXCEPT user2`. Это означает, что если `user1` имеет привилегии, предоставленные с использованием `GRANT OPTION`, он сможет предоставить их любому, кроме `user2`. + ## Примеры {#alter-user-examples} Установить ролями по умолчанию роли, назначенные пользователю: ``` sql -ALTER USER user DEFAULT ROLE role1, role2 +ALTER USER user DEFAULT ROLE role1, role2; ``` Если роли не были назначены пользователю, ClickHouse выбрасывает исключение. @@ -33,7 +45,7 @@ ALTER USER user DEFAULT ROLE role1, role2 Установить ролями по умолчанию все роли, назначенные пользователю: ``` sql -ALTER USER user DEFAULT ROLE ALL +ALTER USER user DEFAULT ROLE ALL; ``` Если роль будет впоследствии назначена пользователю, она автоматически станет ролью по умолчанию. @@ -41,6 +53,11 @@ ALTER USER user DEFAULT ROLE ALL Установить ролями по умолчанию все назначенные пользователю роли кроме `role1` и `role2`: ``` sql -ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2 +ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2; ``` +Разрешить пользователю с аккаунтом `john` предоставить свои привилегии пользователю с аккаунтом `jack`: + +``` sql +ALTER USER john GRANTEES jack; +``` diff --git a/docs/ru/sql-reference/statements/attach.md b/docs/ru/sql-reference/statements/attach.md index 18ec47d05c8..b135507b818 100644 --- a/docs/ru/sql-reference/statements/attach.md +++ b/docs/ru/sql-reference/statements/attach.md @@ -5,12 +5,14 @@ toc_title: ATTACH # ATTACH Statement {#attach} -Запрос полностью аналогичен запросу `CREATE`, но: +Выполняет подключение таблицы, например, при перемещении базы данных на другой сервер. -- вместо слова `CREATE` используется слово `ATTACH`; -- запрос не создаёт данные на диске, а предполагает, что данные уже лежат в соответствующих местах, и всего лишь добавляет информацию о таблице на сервер. После выполнения запроса `ATTACH` сервер будет знать о существовании таблицы. +Запрос не создаёт данные на диске, а предполагает, что данные уже лежат в соответствующих местах, и всего лишь добавляет информацию о таблице на сервер. После выполнения запроса `ATTACH` сервер будет знать о существовании таблицы. -Если таблица перед этим была отключена ([DETACH](../../sql-reference/statements/detach.md)), т.е. её структура известна, можно использовать сокращенную форму записи без определения структуры. +Если таблица перед этим была отключена при помощи ([DETACH](../../sql-reference/statements/detach.md)), т.е. её структура известна, можно использовать сокращенную форму записи без определения структуры. + +## Варианты синтаксиса {#syntax-forms} +### Присоединение существующей таблицы {#attach-existing-table} ``` sql ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] @@ -20,4 +22,38 @@ ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] Если таблица была отключена перманентно, она не будет подключена обратно во время старта сервера, так что нужно явно использовать запрос `ATTACH`, чтобы подключить ее. +### Создание новой таблицы и присоединение данных {#create-new-table-and-attach-data} +**С указанием пути к табличным данным** + +```sql +ATTACH TABLE name FROM 'path/to/data/' (col1 Type1, ...) +``` + +Cоздает новую таблицу с указанной структурой и присоединяет табличные данные из соответствующего каталога в `user_files`. + +**Пример** + +Запрос: + +```sql +DROP TABLE IF EXISTS test; +INSERT INTO TABLE FUNCTION file('01188_attach/test/data.TSV', 'TSV', 's String, n UInt8') VALUES ('test', 42); +ATTACH TABLE test FROM '01188_attach/test' (s String, n UInt8) ENGINE = File(TSV); +SELECT * FROM test; +``` +Результат: + +```sql +┌─s────┬──n─┐ +│ test │ 42 │ +└──────┴────┘ +``` + +**С указанием UUID таблицы** (Только для баз данных `Atomic`) + +```sql +ATTACH TABLE name UUID '' (col1 Type1, ...) +``` + +Cоздает новую таблицу с указанной структурой и присоединяет данные из таблицы с указанным UUID. diff --git a/docs/ru/sql-reference/statements/check-table.md b/docs/ru/sql-reference/statements/check-table.md index 10336f821d0..9592c1a5bc2 100644 --- a/docs/ru/sql-reference/statements/check-table.md +++ b/docs/ru/sql-reference/statements/check-table.md @@ -29,9 +29,36 @@ CHECK TABLE [db.]name В движках `*Log` не предусмотрено автоматическое восстановление данных после сбоя. Используйте запрос `CHECK TABLE`, чтобы своевременно выявлять повреждение данных. -Для движков из семейства `MergeTree` запрос `CHECK TABLE` показывает статус проверки для каждого отдельного куска данных таблицы на локальном сервере. +## Проверка таблиц семейства MergeTree {#checking-mergetree-tables} -**Что делать, если данные повреждены** +Для таблиц семейства `MergeTree` если [check_query_single_value_result](../../operations/settings/settings.md#check_query_single_value_result) = 0, запрос `CHECK TABLE` возвращает статус каждого куска данных таблицы на локальном сервере. + +```sql +SET check_query_single_value_result = 0; +CHECK TABLE test_table; +``` + +```text +┌─part_path─┬─is_passed─┬─message─┐ +│ all_1_4_1 │ 1 │ │ +│ all_1_4_2 │ 1 │ │ +└───────────┴───────────┴─────────┘ +``` + +Если `check_query_single_value_result` = 0, запрос `CHECK TABLE` возвращает статус таблицы в целом. + +```sql +SET check_query_single_value_result = 1; +CHECK TABLE test_table; +``` + +```text +┌─result─┐ +│ 1 │ +└────────┘ +``` + +## Что делать, если данные повреждены {#if-data-is-corrupted} В этом случае можно скопировать оставшиеся неповрежденные данные в другую таблицу. Для этого: diff --git a/docs/ru/sql-reference/statements/create/row-policy.md b/docs/ru/sql-reference/statements/create/row-policy.md index 88709598906..6fe1dc45815 100644 --- a/docs/ru/sql-reference/statements/create/row-policy.md +++ b/docs/ru/sql-reference/statements/create/row-policy.md @@ -5,7 +5,7 @@ toc_title: "Политика доступа" # CREATE ROW POLICY {#create-row-policy-statement} -Создает [фильтры для строк](../../../operations/access-rights.md#row-policy-management), которые пользователь может прочесть из таблицы. +Создает [политики доступа к строкам](../../../operations/access-rights.md#row-policy-management), т.е. фильтры, которые определяют, какие строки пользователь может читать из таблицы. Синтаксис: @@ -13,33 +13,74 @@ toc_title: "Политика доступа" CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1 [, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...] [AS {PERMISSIVE | RESTRICTIVE}] - [FOR SELECT] - [USING condition] + [FOR SELECT] USING condition [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] ``` -Секция `ON CLUSTER` позволяет создавать фильтры для строк на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md). +## Секция USING {#create-row-policy-using} -## Секция AS {#create-row-policy-as} - -С помощью данной секции можно создать политику разрешения или ограничения. - -Политика разрешения предоставляет доступ к строкам. Разрешительные политики, которые применяются к одной таблице, объединяются с помощью логического оператора `OR`. Политики являются разрешительными по умолчанию. - -Политика ограничения запрещает доступ к строкам. Ограничительные политики, которые применяются к одной таблице, объединяются логическим оператором `AND`. - -Ограничительные политики применяются к строкам, прошедшим фильтр разрешительной политики. Если вы не зададите разрешительные политики, пользователь не сможет обращаться ни к каким строкам из таблицы. +Секция `USING` указывает условие для фильтрации строк. Пользователь может видеть строку, если это условие, вычисленное для строки, дает ненулевой результат. ## Секция TO {#create-row-policy-to} -В секции `TO` вы можете перечислить как роли, так и пользователей. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`. +В секции `TO` перечисляются пользователи и роли, для которых должна действовать политика. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`. Ключевым словом `ALL` обозначаются все пользователи, включая текущего. Ключевые слова `ALL EXCEPT` позволяют исключить пользователей из списка всех пользователей. Например, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` +!!! note "Note" + Если для таблицы не задано ни одной политики доступа к строкам, то любой пользователь может выполнить команду SELECT и получить все строки таблицы. Если определить хотя бы одну политику для таблицы, до доступ к строкам будет управляться этими политиками, причем для всех пользователей (даже для тех, для кого политики не определялись). Например, следующая политика + + `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` + + запретит пользователям `mira` и `peter` видеть строки с `b != 1`, и еще запретит всем остальным пользователям (например, пользователю `paul`) видеть какие-либо строки вообще из таблицы `mydb.table1`. + + Если это нежелательно, такое поведение можно исправить, определив дополнительную политику: + + `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` + +## Секция AS {#create-row-policy-as} + +Может быть одновременно активно более одной политики для одной и той же таблицы и одного и того же пользователя. Поэтому нам нужен способ комбинировать политики. + +По умолчанию политики комбинируются с использованием логического оператора `OR`. Например, политики: + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio +``` + +разрешат пользователю с именем `peter` видеть строки, для которых будет верно `b=1` или `c=2`. + +Секция `AS` указывает, как политики должны комбинироваться с другими политиками. Политики могут быть или разрешительными (`PERMISSIVE`), или ограничительными (`RESTRICTIVE`). По умолчанию политики создаются разрешительными (`PERMISSIVE`); такие политики комбинируются с использованием логического оператора `OR`. + +Ограничительные (`RESTRICTIVE`) политики комбинируются с использованием логического оператора `AND`. + +Общая формула выглядит так: + +``` +строка_видима = (одна или больше permissive-политик дала ненулевой результат проверки условия) И + (все restrictive-политики дали ненулевой результат проверки условия) +``` + +Например, политики + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio +``` + +разрешат пользователю с именем `peter` видеть только те строки, для которых будет одновременно `b=1` и `c=2`. + +## Секция ON CLUSTER {#create-row-policy-on-cluster} + +Секция `ON CLUSTER` позволяет создавать политики на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md). + ## Примеры -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost` +`CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira` +`CREATE ROW POLICY filter2 ON mydb.mytable USING a<1000 AND b=5 TO ALL EXCEPT mira` + +`CREATE ROW POLICY filter3 ON mydb.mytable USING 1 TO admin` \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index b998435bcd8..1d65d82b24c 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -46,15 +46,32 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() ### Из запроса SELECT {#from-select-query} ``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... +CREATE TABLE [IF NOT EXISTS] [db.]table_name[(name1 [type1], name2 [type2], ...)] ENGINE = engine AS SELECT ... ``` -Создаёт таблицу со структурой, как результат запроса `SELECT`, с движком engine, и заполняет её данными из SELECT-а. +Создаёт таблицу со структурой, как результат запроса `SELECT`, с движком `engine`, и заполняет её данными из `SELECT`. Также вы можете явно задать описание столбцов. -Во всех случаях, если указано `IF NOT EXISTS`, то запрос не будет возвращать ошибку, если таблица уже существует. В этом случае, запрос будет ничего не делать. +Если таблица уже существует и указано `IF NOT EXISTS`, то запрос ничего не делает. После секции `ENGINE` в запросе могут использоваться и другие секции в зависимости от движка. Подробную документацию по созданию таблиц смотрите в описаниях [движков таблиц](../../../engines/table-engines/index.md#table_engines). +**Пример** + +Запрос: + +``` sql +CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1; +SELECT x, toTypeName(x) FROM t1; +``` + +Результат: + +```text +┌─x─┬─toTypeName(x)─┐ +│ 1 │ String │ +└───┴───────────────┘ +``` + ## Модификатор NULL или NOT NULL {#null-modifiers} Модификатор `NULL` или `NOT NULL`, указанный после типа данных в определении столбца, позволяет или не позволяет типу данных быть [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable). @@ -230,7 +247,7 @@ CREATE TABLE codec_example ) ENGINE = MergeTree() ``` -## Временные таблицы {#vremennye-tablitsy} +## Временные таблицы {#temporary-tables} ClickHouse поддерживает временные таблицы со следующими характеристиками: @@ -329,4 +346,39 @@ SELECT * FROM base.t1; └───┘ ``` +## Секция COMMENT {#comment-table} + +Вы можете добавить комментарий к таблице при ее создании. + +!!!note "Замечание" + Комментарий поддерживается для всех движков таблиц, кроме [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) и [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md). + +**Синтаксис** + +``` sql +CREATE TABLE db.table_name +( + name1 type1, name2 type2, ... +) +ENGINE = engine +COMMENT 'Comment' +``` + +**Пример** + +Запрос: + +``` sql +CREATE TABLE t1 (x String) ENGINE = Memory COMMENT 'The temporary table'; +SELECT name, comment FROM system.tables WHERE name = 't1'; +``` + +Результат: + +```text +┌─name─┬─comment─────────────┐ +│ t1 │ The temporary table │ +└──────┴─────────────────────┘ +``` + diff --git a/docs/ru/sql-reference/statements/create/user.md b/docs/ru/sql-reference/statements/create/user.md index 68277d67052..ea64bff061b 100644 --- a/docs/ru/sql-reference/statements/create/user.md +++ b/docs/ru/sql-reference/statements/create/user.md @@ -9,54 +9,68 @@ toc_title: "Пользователь" Синтаксис: -```sql +``` sql CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [, name2 [ON CLUSTER cluster_name2] ...] - [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] + [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}] [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [DEFAULT ROLE role [,...]] - [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...] ``` +`ON CLUSTER` позволяет создавать пользователей в кластере, см. [Распределенные DDL](../../../sql-reference/distributed-ddl.md). + ## Идентификация Существует несколько способов идентификации пользователя: -- `IDENTIFIED WITH no_password` -- `IDENTIFIED WITH plaintext_password BY 'qwerty'` -- `IDENTIFIED WITH sha256_password BY 'qwerty'` or `IDENTIFIED BY 'password'` -- `IDENTIFIED WITH sha256_hash BY 'hash'` -- `IDENTIFIED WITH double_sha1_password BY 'qwerty'` -- `IDENTIFIED WITH double_sha1_hash BY 'hash'` +- `IDENTIFIED WITH no_password` +- `IDENTIFIED WITH plaintext_password BY 'qwerty'` +- `IDENTIFIED WITH sha256_password BY 'qwerty'` or `IDENTIFIED BY 'password'` +- `IDENTIFIED WITH sha256_hash BY 'hash'` +- `IDENTIFIED WITH double_sha1_password BY 'qwerty'` +- `IDENTIFIED WITH double_sha1_hash BY 'hash'` +- `IDENTIFIED WITH ldap SERVER 'server_name'` +- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'` ## Пользовательский хост Пользовательский хост — это хост, с которого можно установить соединение с сервером ClickHouse. Хост задается в секции `HOST` следующими способами: -- `HOST IP 'ip_address_or_subnetwork'` — Пользователь может подключиться к серверу ClickHouse только с указанного IP-адреса или [подсети](https://ru.wikipedia.org/wiki/Подсеть). Примеры: `HOST IP '192.168.0.0/16'`, `HOST IP '2001:DB8::/32'`. При использовании в эксплуатации указывайте только элементы `HOST IP` (IP-адреса и маски подсети), так как использование `host` и `host_regexp` может привести к дополнительной задержке. -- `HOST ANY` — Пользователь может подключиться с любого хоста. Используется по умолчанию. -- `HOST LOCAL` — Пользователь может подключиться только локально. -- `HOST NAME 'fqdn'` — Хост задается через FQDN. Например, `HOST NAME 'mysite.com'`. -- `HOST NAME REGEXP 'regexp'` — Позволяет использовать регулярные выражения [pcre](http://www.pcre.org/), чтобы задать хосты. Например, `HOST NAME REGEXP '.*\.mysite\.com'`. -- `HOST LIKE 'template'` — Позволяет использовать оператор [LIKE](../../functions/string-search-functions.md#function-like) для фильтрации хостов. Например, `HOST LIKE '%'` эквивалентен `HOST ANY`; `HOST LIKE '%.mysite.com'` разрешает подключение со всех хостов в домене `mysite.com`. +- `HOST IP 'ip_address_or_subnetwork'` — Пользователь может подключиться к серверу ClickHouse только с указанного IP-адреса или [подсети](https://ru.wikipedia.org/wiki/Подсеть). Примеры: `HOST IP '192.168.0.0/16'`, `HOST IP '2001:DB8::/32'`. При использовании в эксплуатации указывайте только элементы `HOST IP` (IP-адреса и маски подсети), так как использование `host` и `host_regexp` может привести к дополнительной задержке. +- `HOST ANY` — Пользователь может подключиться с любого хоста. Используется по умолчанию. +- `HOST LOCAL` — Пользователь может подключиться только локально. +- `HOST NAME 'fqdn'` — Хост задается через FQDN. Например, `HOST NAME 'mysite.com'`. +- `HOST NAME REGEXP 'regexp'` — Позволяет использовать регулярные выражения [pcre](http://www.pcre.org/), чтобы задать хосты. Например, `HOST NAME REGEXP '.*\.mysite\.com'`. +- `HOST LIKE 'template'` — Позволяет использовать оператор [LIKE](../../functions/string-search-functions.md#function-like) для фильтрации хостов. Например, `HOST LIKE '%'` эквивалентен `HOST ANY`; `HOST LIKE '%.mysite.com'` разрешает подключение со всех хостов в домене `mysite.com`. Также, чтобы задать хост, вы можете использовать `@` вместе с именем пользователя. Примеры: -- `CREATE USER mira@'127.0.0.1'` — Эквивалентно `HOST IP`. -- `CREATE USER mira@'localhost'` — Эквивалентно `HOST LOCAL`. -- `CREATE USER mira@'192.168.%.%'` — Эквивалентно `HOST LIKE`. +- `CREATE USER mira@'127.0.0.1'` — Эквивалентно `HOST IP`. +- `CREATE USER mira@'localhost'` — Эквивалентно `HOST LOCAL`. +- `CREATE USER mira@'192.168.%.%'` — Эквивалентно `HOST LIKE`. !!! info "Внимание" ClickHouse трактует конструкцию `user_name@'address'` как имя пользователя целиком. То есть технически вы можете создать несколько пользователей с одинаковыми `user_name`, но разными частями конструкции после `@`, но лучше так не делать. + +## Секция GRANTEES {#grantees} +Указываются пользователи или роли, которым разрешено получать [привилегии](../../../sql-reference/statements/grant.md#grant-privileges) от создаваемого пользователя при условии, что этому пользователю также предоставлен весь необходимый доступ с использованием [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Параметры секции `GRANTEES`: + +- `user` — указывается пользователь, которому разрешено получать привилегии от создаваемого пользователя. +- `role` — указывается роль, которой разрешено получать привилегии от создаваемого пользователя. +- `ANY` — любому пользователю или любой роли разрешено получать привилегии от создаваемого пользователя. Используется по умолчанию. +- `NONE` — никому не разрешено получать привилегии от создаваемого пользователя. + +Вы можете исключить любого пользователя или роль, используя выражение `EXCEPT`. Например, `CREATE USER user1 GRANTEES ANY EXCEPT user2`. Это означает, что если `user1` имеет привилегии, предоставленные с использованием `GRANT OPTION`, он сможет предоставить их любому, кроме `user2`. ## Примеры {#create-user-examples} - Создать аккаунт `mira`, защищенный паролем `qwerty`: ```sql -CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' +CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty'; ``` Пользователь `mira` должен запустить клиентское приложение на хосте, где запущен ClickHouse. @@ -64,13 +78,13 @@ CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' Создать аккаунт `john`, назначить на него роли, сделать данные роли ролями по умолчанию: ``` sql -CREATE USER john DEFAULT ROLE role1, role2 +CREATE USER john DEFAULT ROLE role1, role2; ``` Создать аккаунт `john` и установить ролями по умолчанию все его будущие роли: ``` sql -CREATE USER user DEFAULT ROLE ALL +CREATE USER john DEFAULT ROLE ALL; ``` Когда роль будет назначена аккаунту `john`, она автоматически станет ролью по умолчанию. @@ -78,7 +92,11 @@ CREATE USER user DEFAULT ROLE ALL Создать аккаунт `john` и установить ролями по умолчанию все его будущие роли, кроме `role1` и `role2`: ``` sql -CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2 +CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2; ``` - \ No newline at end of file +Создать пользователя с аккаунтом `john` и разрешить ему предоставить свои привилегии пользователю с аккаунтом `jack`: + +``` sql +CREATE USER john GRANTEES jack; +``` diff --git a/docs/ru/sql-reference/statements/detach.md b/docs/ru/sql-reference/statements/detach.md index d707acd7ccf..af915d38772 100644 --- a/docs/ru/sql-reference/statements/detach.md +++ b/docs/ru/sql-reference/statements/detach.md @@ -10,7 +10,7 @@ toc_title: DETACH Синтаксис: ``` sql -DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] +DETACH TABLE|VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] [PERMANENTLY] ``` Но ни данные, ни метаданные таблицы или материализованного представления не удаляются. При следующем запуске сервера, если не было использовано `PERMANENTLY`, сервер прочитает метаданные и снова узнает о таблице/представлении. Если таблица или представление были отключены перманентно, сервер не подключит их обратно автоматически. diff --git a/docs/ru/sql-reference/statements/explain.md b/docs/ru/sql-reference/statements/explain.md new file mode 100644 index 00000000000..c925e7030a7 --- /dev/null +++ b/docs/ru/sql-reference/statements/explain.md @@ -0,0 +1,388 @@ +--- +toc_priority: 39 +toc_title: EXPLAIN +--- + +# EXPLAIN {#explain} + +Выводит план выполнения запроса. + +Синтаксис: + +```sql +EXPLAIN [AST | SYNTAX | PLAN | PIPELINE] [setting = value, ...] SELECT ... [FORMAT ...] +``` + +Пример: + +```sql +EXPLAIN SELECT sum(number) FROM numbers(10) UNION ALL SELECT sum(number) FROM numbers(10) ORDER BY sum(number) ASC FORMAT TSV; +``` + +```sql +Union + Expression (Projection) + Expression (Before ORDER BY and SELECT) + Aggregating + Expression (Before GROUP BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + Expression (Projection) + MergingSorted (Merge sorted streams for ORDER BY) + MergeSorting (Merge sorted blocks for ORDER BY) + PartialSorting (Sort each block for ORDER BY) + Expression (Before ORDER BY and SELECT) + Aggregating + Expression (Before GROUP BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) +``` + +## Типы EXPLAIN {#explain-types} + +- `AST` — абстрактное синтаксическое дерево. +- `SYNTAX` — текст запроса после оптимизации на уровне AST. +- `PLAN` — план выполнения запроса. +- `PIPELINE` — конвейер выполнения запроса. + +### EXPLAIN AST {#explain-ast} + +Дамп AST запроса. Поддерживает все типы запросов, не только `SELECT`. + +Примеры: + +```sql +EXPLAIN AST SELECT 1; +``` + +```sql +SelectWithUnionQuery (children 1) + ExpressionList (children 1) + SelectQuery (children 1) + ExpressionList (children 1) + Literal UInt64_1 +``` + +```sql +EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today(); +``` + +```sql + explain + AlterQuery t1 (children 1) + ExpressionList (children 1) + AlterCommand 27 (children 1) + Function equals (children 1) + ExpressionList (children 2) + Identifier date + Function today (children 1) + ExpressionList +``` + +### EXPLAIN SYNTAX {#explain-syntax} + +Возвращает текст запроса после применения синтаксических оптимизаций. + +Пример: + +```sql +EXPLAIN SYNTAX SELECT * FROM system.numbers AS a, system.numbers AS b, system.numbers AS c; +``` + +```sql +SELECT + `--a.number` AS `a.number`, + `--b.number` AS `b.number`, + number AS `c.number` +FROM +( + SELECT + number AS `--a.number`, + b.number AS `--b.number` + FROM system.numbers AS a + CROSS JOIN system.numbers AS b +) AS `--.s` +CROSS JOIN system.numbers AS c +``` + +### EXPLAIN PLAN {#explain-plan} + +Дамп шагов выполнения запроса. + +Настройки: + +- `header` — выводит выходной заголовок для шага. По умолчанию: 0. +- `description` — выводит описание шага. По умолчанию: 1. +- `indexes` — показывает используемые индексы, количество отфильтрованных кусков и гранул для каждого примененного индекса. По умолчанию: 0. Поддерживается для таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). +- `actions` — выводит подробную информацию о действиях, выполняемых на данном шаге. По умолчанию: 0. +- `json` — выводит шаги выполнения запроса в виде строки в формате [JSON](../../interfaces/formats.md#json). По умолчанию: 0. Чтобы избежать ненужного экранирования, рекомендуется использовать формат [TSVRaw](../../interfaces/formats.md#tabseparatedraw). + +Пример: + +```sql +EXPLAIN SELECT sum(number) FROM numbers(10) GROUP BY number % 4; +``` + +```sql +Union + Expression (Projection) + Expression (Before ORDER BY and SELECT) + Aggregating + Expression (Before GROUP BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) +``` + +!!! note "Примечание" + Оценка стоимости выполнения шага и запроса не поддерживается. + +При `json = 1` шаги выполнения запроса выводятся в формате JSON. Каждый узел — это словарь, в котором всегда есть ключи `Node Type` и `Plans`. `Node Type` — это строка с именем шага. `Plans` — это массив с описаниями дочерних шагов. Другие дополнительные ключи могут быть добавлены в зависимости от типа узла и настроек. + +Пример: + +```sql +EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw; +``` + +```json +[ + { + "Plan": { + "Node Type": "Union", + "Plans": [ + { + "Node Type": "Expression", + "Plans": [ + { + "Node Type": "SettingQuotaAndLimits", + "Plans": [ + { + "Node Type": "ReadFromStorage" + } + ] + } + ] + }, + { + "Node Type": "Expression", + "Plans": [ + { + "Node Type": "SettingQuotaAndLimits", + "Plans": [ + { + "Node Type": "ReadFromStorage" + } + ] + } + ] + } + ] + } + } +] +``` + +При `description` = 1 к шагу добавляется ключ `Description`: + +```json +{ + "Node Type": "ReadFromStorage", + "Description": "SystemOne" +} +``` + +При `header` = 1 к шагу добавляется ключ `Header` в виде массива столбцов. + +Пример: + +```sql +EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy; +``` + +```json +[ + { + "Plan": { + "Node Type": "Expression", + "Header": [ + { + "Name": "1", + "Type": "UInt8" + }, + { + "Name": "plus(2, dummy)", + "Type": "UInt16" + } + ], + "Plans": [ + { + "Node Type": "SettingQuotaAndLimits", + "Header": [ + { + "Name": "dummy", + "Type": "UInt8" + } + ], + "Plans": [ + { + "Node Type": "ReadFromStorage", + "Header": [ + { + "Name": "dummy", + "Type": "UInt8" + } + ] + } + ] + } + ] + } + } +] +``` + +При `indexes` = 1 добавляется ключ `Indexes`. Он содержит массив используемых индексов. Каждый индекс описывается как строка в формате JSON с ключом `Type` (`MinMax`, `Partition`, `PrimaryKey` или `Skip`) и дополнительные ключи: + +- `Name` — имя индекса (на данный момент используется только для индекса `Skip`). +- `Keys` — массив столбцов, используемых индексом. +- `Condition` — строка с используемым условием. +- `Description` — индекс (на данный момент используется только для индекса `Skip`). +- `Initial Parts` — количество кусков до применения индекса. +- `Selected Parts` — количество кусков после применения индекса. +- `Initial Granules` — количество гранул до применения индекса. +- `Selected Granulesis` — количество гранул после применения индекса. + +Пример: + +```json +"Node Type": "ReadFromMergeTree", +"Indexes": [ + { + "Type": "MinMax", + "Keys": ["y"], + "Condition": "(y in [1, +inf))", + "Initial Parts": 5, + "Selected Parts": 4, + "Initial Granules": 12, + "Selected Granules": 11 + }, + { + "Type": "Partition", + "Keys": ["y", "bitAnd(z, 3)"], + "Condition": "and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +inf)), (bitAnd(z, 3) not in [1, 1])))", + "Initial Parts": 4, + "Selected Parts": 3, + "Initial Granules": 11, + "Selected Granules": 10 + }, + { + "Type": "PrimaryKey", + "Keys": ["x", "y"], + "Condition": "and((x in [11, +inf)), (y in [1, +inf)))", + "Initial Parts": 3, + "Selected Parts": 2, + "Initial Granules": 10, + "Selected Granules": 6 + }, + { + "Type": "Skip", + "Name": "t_minmax", + "Description": "minmax GRANULARITY 2", + "Initial Parts": 2, + "Selected Parts": 1, + "Initial Granules": 6, + "Selected Granules": 2 + }, + { + "Type": "Skip", + "Name": "t_set", + "Description": "set GRANULARITY 2", + "Initial Parts": 1, + "Selected Parts": 1, + "Initial Granules": 2, + "Selected Granules": 1 + } +] +``` + +При `actions` = 1 добавляются ключи, зависящие от типа шага. + +Пример: + +```sql +EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; +``` + +```json +[ + { + "Plan": { + "Node Type": "Expression", + "Expression": { + "Inputs": [], + "Actions": [ + { + "Node Type": "Column", + "Result Type": "UInt8", + "Result Type": "Column", + "Column": "Const(UInt8)", + "Arguments": [], + "Removed Arguments": [], + "Result": 0 + } + ], + "Outputs": [ + { + "Name": "1", + "Type": "UInt8" + } + ], + "Positions": [0], + "Project Input": true + }, + "Plans": [ + { + "Node Type": "SettingQuotaAndLimits", + "Plans": [ + { + "Node Type": "ReadFromStorage" + } + ] + } + ] + } + } +] +``` + +### EXPLAIN PIPELINE {#explain-pipeline} + +Настройки: + +- `header` — выводит заголовок для каждого выходного порта. По умолчанию: 0. +- `graph` — выводит граф, описанный на языке [DOT](https://ru.wikipedia.org/wiki/DOT_(язык)). По умолчанию: 0. +- `compact` — выводит граф в компактном режиме, если включена настройка `graph`. По умолчанию: 1. + +Пример: + +```sql +EXPLAIN PIPELINE SELECT sum(number) FROM numbers_mt(100000) GROUP BY number % 4; +``` + +```sql +(Union) +(Expression) +ExpressionTransform + (Expression) + ExpressionTransform + (Aggregating) + Resize 2 → 1 + AggregatingTransform × 2 + (Expression) + ExpressionTransform × 2 + (SettingQuotaAndLimits) + (ReadFromStorage) + NumbersMt × 2 0 → 1 +``` + +[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/explain/) diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index 7b2d26902ef..093e6eb3b93 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -93,7 +93,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `ALTER ADD CONSTRAINT` - `ALTER DROP CONSTRAINT` - `ALTER TTL` - - `ALTER MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL` - `ALTER SETTINGS` - `ALTER MOVE PARTITION` - `ALTER FETCH PARTITION` @@ -104,9 +104,9 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` + - `CREATE TEMPORARY TABLE` - `CREATE VIEW` - `CREATE DICTIONARY` - - `CREATE TEMPORARY TABLE` - [DROP](#grant-drop) - `DROP DATABASE` - `DROP TABLE` @@ -152,7 +152,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `SYSTEM RELOAD` - `SYSTEM RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES` - `SYSTEM TTL MERGES` - `SYSTEM FETCHES` @@ -279,7 +279,7 @@ GRANT INSERT(x,y) ON db.table TO john - `ALTER ADD CONSTRAINT`. Уровень: `TABLE`. Алиасы: `ADD CONSTRAINT` - `ALTER DROP CONSTRAINT`. Уровень: `TABLE`. Алиасы: `DROP CONSTRAINT` - `ALTER TTL`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY TTL`, `MODIFY TTL` - - `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL` - `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `FETCH PARTITION` @@ -307,9 +307,9 @@ GRANT INSERT(x,y) ON db.table TO john - `CREATE`. Уровень: `GROUP` - `CREATE DATABASE`. Уровень: `DATABASE` - `CREATE TABLE`. Уровень: `TABLE` + - `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL` - `CREATE VIEW`. Уровень: `VIEW` - `CREATE DICTIONARY`. Уровень: `DICTIONARY` - - `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL` **Дополнительно** @@ -407,7 +407,7 @@ GRANT INSERT(x,y) ON db.table TO john - `SYSTEM RELOAD`. Уровень: `GROUP` - `SYSTEM RELOAD CONFIG`. Уровень: `GLOBAL`. Алиасы: `RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY`. Уровень: `GLOBAL`. Алиасы: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES` - `SYSTEM TTL MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES` - `SYSTEM FETCHES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES` diff --git a/docs/ru/sql-reference/statements/insert-into.md b/docs/ru/sql-reference/statements/insert-into.md index bbd330962cf..328f1023624 100644 --- a/docs/ru/sql-reference/statements/insert-into.md +++ b/docs/ru/sql-reference/statements/insert-into.md @@ -107,6 +107,8 @@ INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... Для табличной функции [input()](../table-functions/input.md) после секции `SELECT` должна следовать секция `FORMAT`. +Чтобы вставить значение по умолчанию вместо `NULL` в столбец, который не позволяет хранить `NULL`, включите настройку [insert_null_as_default](../../operations/settings/settings.md#insert_null_as_default). + ### Замечания о производительности {#zamechaniia-o-proizvoditelnosti} `INSERT` сортирует входящие данные по первичному ключу и разбивает их на партиции по ключу партиционирования. Если вы вставляете данные в несколько партиций одновременно, то это может значительно снизить производительность запроса `INSERT`. Чтобы избежать этого: diff --git a/docs/ru/sql-reference/statements/optimize.md b/docs/ru/sql-reference/statements/optimize.md index 44101910a6c..e1a9d613537 100644 --- a/docs/ru/sql-reference/statements/optimize.md +++ b/docs/ru/sql-reference/statements/optimize.md @@ -5,19 +5,83 @@ toc_title: OPTIMIZE # OPTIMIZE {#misc_operations-optimize} -``` sql -OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] -``` - -Запрос пытается запустить внеплановый мёрж кусков данных для таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). Другие движки таблиц не поддерживаются. - -Если `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на мёрж и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`). - -- Если `OPTIMIZE` не выполняет мёрж по любой причине, ClickHouse не оповещает об этом клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop). -- Если указать `PARTITION`, то оптимизация выполняется только для указанной партиции. [Как задавать имя партиции в запросах](alter/index.md#alter-how-to-specify-part-expr). -- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния. -- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех колонках), имеет смысл только для движка MergeTree. +Запрос пытается запустить внеплановое слияние кусков данных для таблиц. !!! warning "Внимание" - Запрос `OPTIMIZE` не может устранить причину появления ошибки «Too many parts». - + `OPTIMIZE` не устраняет причину появления ошибки `Too many parts`. + +**Синтаксис** + +``` sql +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]] +``` + +Может применяться к таблицам семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md), [MaterializedView](../../engines/table-engines/special/materializedview.md) и [Buffer](../../engines/table-engines/special/buffer.md). Другие движки таблиц не поддерживаются. + +Если запрос `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на слияние и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`). + +- По умолчанию, если запросу `OPTIMIZE` не удалось выполнить слияние, то +ClickHouse не оповещает клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop). +- Если указать `PARTITION`, то оптимизация выполняется только для указанной партиции. [Как задавать имя партиции в запросах](alter/index.md#alter-how-to-specify-part-expr). +- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске данных. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния. +- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех столбцах), имеет смысл только для движка MergeTree. + +## Выражение BY {#by-expression} + +Чтобы выполнить дедупликацию по произвольному набору столбцов, вы можете явно указать список столбцов или использовать любую комбинацию подстановки [`*`](../../sql-reference/statements/select/index.md#asterisk), выражений [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) и [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier). + + Список столбцов для дедупликации должен включать все столбцы, указанные в условиях сортировки (первичный ключ и ключ сортировки), а также в условиях партиционирования (ключ партиционирования). + + !!! note "Примечание" + Обратите внимание, что символ подстановки `*` обрабатывается так же, как и в запросах `SELECT`: столбцы `MATERIALIZED` и `ALIAS` не включаются в результат. + Если указать пустой список или выражение, которое возвращает пустой список, или дедуплицировать столбец по псевдониму (`ALIAS`), то сервер вернет ошибку. + + +**Примеры** + +Рассмотрим таблицу: + +``` sql +CREATE TABLE example ( + primary_key Int32, + secondary_key Int32, + value UInt32, + partition_key UInt32, + materialized_value UInt32 MATERIALIZED 12345, + aliased_value UInt32 ALIAS 2, + PRIMARY KEY primary_key +) ENGINE=MergeTree +PARTITION BY partition_key; +``` + +Прежний способ дедупликации, когда учитываются все столбцы. Строка удаляется только в том случае, если все значения во всех столбцах равны соответствующим значениям в предыдущей строке. + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE; +``` + +Дедупликация по всем столбцам, кроме `ALIAS` и `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key` и `materialized_value`. + + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE BY *; +``` + +Дедупликация по всем столбцам, кроме `ALIAS`, `MATERIALIZED` и `materialized_value`: столбцы `primary_key`, `secondary_key`, `value` и `partition_key`. + + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE BY * EXCEPT materialized_value; +``` + +Дедупликация по столбцам `primary_key`, `secondary_key` и `partition_key`. + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE BY primary_key, secondary_key, partition_key; +``` + +Дедупликация по любому столбцу, соответствующему регулярному выражению: столбцам `primary_key`, `secondary_key` и `partition_key`. + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE BY COLUMNS('.*_key'); +``` diff --git a/docs/ru/sql-reference/statements/rename.md b/docs/ru/sql-reference/statements/rename.md index 104918c1a73..192426dbafa 100644 --- a/docs/ru/sql-reference/statements/rename.md +++ b/docs/ru/sql-reference/statements/rename.md @@ -3,8 +3,16 @@ toc_priority: 48 toc_title: RENAME --- -# RENAME {#misc_operations-rename} +# RENAME Statement {#misc_operations-rename} +## RENAME DATABASE {#misc_operations-rename_database} +Переименование базы данных + +``` +RENAME DATABASE atomic_database1 TO atomic_database2 [ON CLUSTER cluster] +``` + +## RENAME TABLE {#misc_operations-rename_table} Переименовывает одну или несколько таблиц. ``` sql @@ -12,5 +20,3 @@ RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... ``` Переименовывание таблицы является лёгкой операцией. Если вы указали после `TO` другую базу данных, то таблица будет перенесена в эту базу данных. При этом, директории с базами данных должны быть расположены в одной файловой системе (иначе возвращается ошибка). В случае переименования нескольких таблиц в одном запросе — это неатомарная операция, может выполнится частично, запросы в других сессиях могут получить ошибку `Table ... doesn't exist...`. - - diff --git a/docs/ru/sql-reference/statements/select/index.md b/docs/ru/sql-reference/statements/select/index.md index 886952ea5cf..a3b4e889397 100644 --- a/docs/ru/sql-reference/statements/select/index.md +++ b/docs/ru/sql-reference/statements/select/index.md @@ -45,6 +45,7 @@ SELECT [DISTINCT] expr_list - [Секция SELECT](#select-clause) - [Секция DISTINCT](distinct.md) - [Секция LIMIT](limit.md) + [Секция OFFSET](offset.md) - [Секция UNION ALL](union.md) - [Секция INTO OUTFILE](into-outfile.md) - [Секция FORMAT](format.md) diff --git a/docs/ru/sql-reference/statements/select/limit.md b/docs/ru/sql-reference/statements/select/limit.md index 03b720226f0..e4012e89556 100644 --- a/docs/ru/sql-reference/statements/select/limit.md +++ b/docs/ru/sql-reference/statements/select/limit.md @@ -12,13 +12,16 @@ toc_title: LIMIT При отсутствии секции [ORDER BY](order-by.md), однозначно сортирующей результат, результат может быть произвольным и может являться недетерминированным. +!!! note "Примечание" + Количество возвращаемых строк может зависеть также от настройки [limit](../../../operations/settings/settings.md#limit). + ## Модификатор LIMIT ... WITH TIES {#limit-with-ties} Когда вы установите модификатор WITH TIES для `LIMIT n[,m]` и указываете `ORDER BY expr_list`, вы получите первые `n` или `n,m` строк и дополнительно все строки с теми же самым значениями полей указанных в `ORDER BY` равными строке на позиции `n` для `LIMIT n` или `m` для `LIMIT n,m`. Этот модификатор также может быть скомбинирован с [ORDER BY ... WITH FILL модификатором](../../../sql-reference/statements/select/order-by.md#orderby-with-fill) -Для примера следующий запрос +Для примера следующий запрос: ```sql SELECT * FROM ( SELECT number%50 AS n FROM numbers(100) diff --git a/docs/ru/sql-reference/statements/select/offset.md b/docs/ru/sql-reference/statements/select/offset.md new file mode 100644 index 00000000000..31ff1d6ea8b --- /dev/null +++ b/docs/ru/sql-reference/statements/select/offset.md @@ -0,0 +1,86 @@ +--- +toc_title: OFFSET +--- + +# Секция OFFSET FETCH {#offset-fetch} + +`OFFSET` и `FETCH` позволяют извлекать данные по частям. Они указывают строки, которые вы хотите получить в результате запроса. + +``` sql +OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] +``` + +`offset_row_count` или `fetch_row_count` может быть числом или литеральной константой. Если вы не задаете `fetch_row_count` явно, используется значение по умолчанию, равное 1. + +`OFFSET` указывает количество строк, которые необходимо пропустить перед началом возврата строк из запроса. + +`FETCH` указывает максимальное количество строк, которые могут быть получены в результате запроса. + +Опция `ONLY` используется для возврата строк, которые следуют сразу же за строками, пропущенными секцией `OFFSET`. В этом случае `FETCH` — это альтернатива [LIMIT](../../../sql-reference/statements/select/limit.md). Например, следующий запрос + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; +``` + +идентичен запросу + +``` sql +SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; +``` + +Опция `WITH TIES` используется для возврата дополнительных строк, которые привязываются к последней в результате запроса. Например, если `fetch_row_count` имеет значение 5 и существуют еще 2 строки с такими же значениями столбцов, указанных в `ORDER BY`, что и у пятой строки результата, то финальный набор будет содержать 7 строк. + +!!! note "Примечание" + Секция `OFFSET` должна находиться перед секцией `FETCH`, если обе присутствуют. + +!!! note "Примечание" + Общее количество пропущенных строк может зависеть также от настройки [offset](../../../operations/settings/settings.md#offset). + +## Примеры {#examples} + +Входная таблица: + +``` text +┌─a─┬─b─┐ +│ 1 │ 1 │ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 1 │ 3 │ +│ 5 │ 4 │ +│ 0 │ 6 │ +│ 5 │ 7 │ +└───┴───┘ +``` + +Использование опции `ONLY`: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; +``` + +Результат: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +└───┴───┘ +``` + +Использование опции `WITH TIES`: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; +``` + +Результат: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +│ 5 │ 7 │ +└───┴───┘ +``` diff --git a/docs/ru/sql-reference/statements/select/order-by.md b/docs/ru/sql-reference/statements/select/order-by.md index 9ddec923701..cb49d167b13 100644 --- a/docs/ru/sql-reference/statements/select/order-by.md +++ b/docs/ru/sql-reference/statements/select/order-by.md @@ -392,84 +392,3 @@ ORDER BY │ 1970-03-12 │ 1970-01-08 │ original │ └────────────┴────────────┴──────────┘ ``` - -## Секция OFFSET FETCH {#offset-fetch} - -`OFFSET` и `FETCH` позволяют извлекать данные по частям. Они указывают строки, которые вы хотите получить в результате запроса. - -``` sql -OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] -``` - -`offset_row_count` или `fetch_row_count` может быть числом или литеральной константой. Если вы не используете `fetch_row_count`, то его значение равно 1. - -`OFFSET` указывает количество строк, которые необходимо пропустить перед началом возврата строк из запроса. - -`FETCH` указывает максимальное количество строк, которые могут быть получены в результате запроса. - -Опция `ONLY` используется для возврата строк, которые следуют сразу же за строками, пропущенными секцией `OFFSET`. В этом случае `FETCH` — это альтернатива [LIMIT](../../../sql-reference/statements/select/limit.md). Например, следующий запрос - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; -``` - -идентичен запросу - -``` sql -SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; -``` - -Опция `WITH TIES` используется для возврата дополнительных строк, которые привязываются к последней в результате запроса. Например, если `fetch_row_count` имеет значение 5 и существуют еще 2 строки с такими же значениями столбцов, указанных в `ORDER BY`, что и у пятой строки результата, то финальный набор будет содержать 7 строк. - -!!! note "Примечание" - Секция `OFFSET` должна находиться перед секцией `FETCH`, если обе присутствуют. - -### Примеры {#examples} - -Входная таблица: - -``` text -┌─a─┬─b─┐ -│ 1 │ 1 │ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 1 │ 3 │ -│ 5 │ 4 │ -│ 0 │ 6 │ -│ 5 │ 7 │ -└───┴───┘ -``` - -Использование опции `ONLY`: - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; -``` - -Результат: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 5 │ 4 │ -└───┴───┘ -``` - -Использование опции `WITH TIES`: - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; -``` - -Результат: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 5 │ 4 │ -│ 5 │ 7 │ -└───┴───┘ -``` - diff --git a/docs/ru/sql-reference/statements/select/prewhere.md b/docs/ru/sql-reference/statements/select/prewhere.md index c2a02b1a436..5ba25e6fa6e 100644 --- a/docs/ru/sql-reference/statements/select/prewhere.md +++ b/docs/ru/sql-reference/statements/select/prewhere.md @@ -16,6 +16,9 @@ Prewhere — это оптимизация для более эффективн Если значение параметра `optimize_move_to_prewhere` равно 0, эвристика по автоматическому перемещнию части выражений из `WHERE` к `PREWHERE` отключается. +!!! note "Внимание" + Секция `PREWHERE` выполняется до `FINAL`, поэтому результаты запросов `FROM FINAL` могут исказится при использовании `PREWHERE` с полями не входящями в `ORDER BY` таблицы. + ## Ограничения {#limitations} `PREWHERE` поддерживается только табличными движками из семейства `*MergeTree`. diff --git a/docs/ru/sql-reference/statements/select/union.md b/docs/ru/sql-reference/statements/select/union.md index de8a9b0e4ea..a1e31a0be7f 100644 --- a/docs/ru/sql-reference/statements/select/union.md +++ b/docs/ru/sql-reference/statements/select/union.md @@ -78,3 +78,7 @@ SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 2; Запросы, которые являются частью `UNION/UNION ALL/UNION DISTINCT`, выполняются параллельно, и их результаты могут быть смешаны вместе. +**Смотрите также** + +- Настройка [insert_null_as_default](../../../operations/settings/settings.md#insert_null_as_default). +- Настройка [union_default_mode](../../../operations/settings/settings.md#union-default-mode). diff --git a/docs/ru/sql-reference/statements/system.md b/docs/ru/sql-reference/statements/system.md index ab68033d4f3..8c82cacdc43 100644 --- a/docs/ru/sql-reference/statements/system.md +++ b/docs/ru/sql-reference/statements/system.md @@ -196,7 +196,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных: ``` sql -SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] +SYSTEM START MOVES [[db.]merge_tree_family_table_name] ``` ## Managing ReplicatedMergeTree Tables {#query-language-system-replicated} @@ -204,6 +204,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] ClickHouse может управлять фоновыми процессами связанными c репликацией в таблицах семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md). ### STOP FETCHES {#query_language-system-stop-fetches} + Позволяет остановить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`: Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет. @@ -212,6 +213,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name] ``` ### START FETCHES {#query_language-system-start-fetches} + Позволяет запустить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`: Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет. @@ -220,6 +222,7 @@ SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name] ``` ### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends} + Позволяет остановить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`: ``` sql @@ -227,6 +230,7 @@ SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] ``` ### START REPLICATED SENDS {#query_language-system-start-replicated-sends} + Позволяет запустить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`: ``` sql @@ -234,6 +238,7 @@ SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] ``` ### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues} + Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: ``` sql @@ -241,6 +246,7 @@ SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] ``` ### START REPLICATION QUEUES {#query_language-system-start-replication-queues} + Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: ``` sql @@ -248,20 +254,24 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] ``` ### SYNC REPLICA {#query_language-system-sync-replica} + Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени: ``` sql SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name ``` +После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` синхронизирует команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все синхронизированные команды. + ### RESTART REPLICA {#query_language-system-restart-replica} -Реинициализация состояния Zookeeper сессий для таблицы семейства `ReplicatedMergeTree`, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо -Инициализация очереди репликации на основе данных ZooKeeper, происходит так же как при attach table. На короткое время таблица станет недоступной для любых операций. + +Реинициализация состояния Zookeeper-сессий для таблицы семейства `ReplicatedMergeTree`. Сравнивает текущее состояние с тем, что хранится в Zookeeper, как источник правды, и добавляет задачи в очередь репликации в Zookeeper, если необходимо. +Инициализация очереди репликации на основе данных ZooKeeper происходит так же, как при attach table. На короткое время таблица станет недоступной для любых операций. ``` sql SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name ``` ### RESTART REPLICAS {#query_language-system-restart-replicas} -Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо +Реинициализация состояния ZooKeeper-сессий для всех `ReplicatedMergeTree` таблиц. Сравнивает текущее состояние реплики с тем, что хранится в ZooKeeper, как c источником правды, и добавляет задачи в очередь репликации в ZooKeeper, если необходимо. diff --git a/docs/ru/sql-reference/statements/truncate.md b/docs/ru/sql-reference/statements/truncate.md index b23d96d5b08..63f7fa86ea5 100644 --- a/docs/ru/sql-reference/statements/truncate.md +++ b/docs/ru/sql-reference/statements/truncate.md @@ -11,6 +11,6 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Удаляет все данные из таблицы. Если условие `IF EXISTS` не указано, запрос вернет ошибку, если таблицы не существует. -Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md) и [Null](../../engines/table-engines/special/null.md). +Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) и [Null](../../engines/table-engines/special/null.md). diff --git a/docs/ru/sql-reference/syntax.md b/docs/ru/sql-reference/syntax.md index 6a923fd6b58..dbbf5f92612 100644 --- a/docs/ru/sql-reference/syntax.md +++ b/docs/ru/sql-reference/syntax.md @@ -128,7 +128,7 @@ expr AS alias Например, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - В функции [CAST](sql_reference/syntax.md#type_conversion_function-cast), ключевое слово `AS` имеет другое значение. Смотрите описание функции. + В функции [CAST](../sql_reference/syntax.md#type_conversion_function-cast), ключевое слово `AS` имеет другое значение. Смотрите описание функции. - `expr` — любое выражение, которое поддерживает ClickHouse. @@ -138,7 +138,7 @@ expr AS alias Например, `SELECT "table t".column_name FROM table_name AS "table t"`. -### Примечания по использованию {#primechaniia-po-ispolzovaniiu} +### Примечания по использованию {#notes-on-usage} Синонимы являются глобальными для запроса или подзапроса, и вы можете определить синоним в любой части запроса для любого выражения. Например, `SELECT (1 AS n) + 2, n`. @@ -169,9 +169,9 @@ Received exception from server (version 18.14.17): Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. ``` -В этом примере мы объявили таблицу `t` со столбцом `b`. Затем, при выборе данных, мы определили синоним `sum(b) AS b`. Поскольку синонимы глобальные, то ClickHouse заменил литерал `b` в выражении `argMax(a, b)` выражением `sum(b)`. Эта замена вызвала исключение. +В этом примере мы объявили таблицу `t` со столбцом `b`. Затем, при выборе данных, мы определили синоним `sum(b) AS b`. Поскольку синонимы глобальные, то ClickHouse заменил литерал `b` в выражении `argMax(a, b)` выражением `sum(b)`. Эта замена вызвала исключение. Можно изменить это поведение, включив настройку [prefer_column_name_to_alias](../operations/settings/settings.md#prefer_column_name_to_alias), для этого нужно установить ее в значение `1`. -## Звёздочка {#zviozdochka} +## Звёздочка {#asterisk} В запросе `SELECT`, вместо выражения может стоять звёздочка. Подробнее смотрите раздел «SELECT». @@ -180,4 +180,3 @@ Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception Выражение представляет собой функцию, идентификатор, литерал, применение оператора, выражение в скобках, подзапрос, звёздочку. А также может содержать синоним. Список выражений - одно выражение или несколько выражений через запятую. Функции и операторы, в свою очередь, в качестве аргументов, могут иметь произвольные выражения. - diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index a8ed23db8ed..2d8afe28f1e 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -5,43 +5,46 @@ toc_title: postgresql # postgresql {#postgresql} -Позволяет выполнять запросы `SELECT` над данными, хранящимися на удалённом PostgreSQL сервере. +Позволяет выполнять запросы `SELECT` и `INSERT` над таблицами удаленной БД PostgreSQL. **Синтаксис** + ``` sql -postgresql('host:port', 'database', 'table', 'user', 'password') +postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`]) ``` -**Параметры** +**Аргументы** - `host:port` — адрес сервера PostgreSQL. - - `database` — имя базы данных на удалённом сервере. - - `table` — имя таблицы на удалённом сервере. - - `user` — пользователь PostgreSQL. - - `password` — пароль пользователя. - - -SELECT запросы на стороне PostgreSQL выполняются как `COPY (SELECT ...) TO STDOUT` внутри транзакции PostgreSQL только на чтение с коммитом после каждого `SELECT` запроса. - -Простые условия для `WHERE` такие как `=, !=, >, >=, <, <=, IN` исполняются на стороне PostgreSQL сервера. - -Все операции объединения, аггрегации, сортировки, условия `IN [ array ]` и ограничения `LIMIT` выполняются на стороне ClickHouse только после того как запрос к PostgreSQL закончился. - -INSERT запросы на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого `INSERT` запроса. - -PostgreSQL массивы конвертируются в массивы ClickHouse. -Будьте осторожны в PostgreSQL массивы созданные как type_name[], являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы, внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. +- `schema` — имя схемы, если не используется схема по умолчанию. Необязательный аргумент. **Возвращаемое значение** -Объект таблицы с теми же столбцами, что и в исходной таблице PostgreSQL. +Таблица с теми же столбцами, что и в исходной таблице PostgreSQL. !!! info "Примечание" -В запросах `INSERT` для того чтобы отличить табличную функцию `postgresql(...)` от таблицы со списком имен столбцов вы должны указывать ключевые слова `FUNCTION` или `TABLE FUNCTION`. See examples below. + В запросах `INSERT` для того чтобы отличить табличную функцию `postgresql(...)` от таблицы со списком имен столбцов вы должны указывать ключевые слова `FUNCTION` или `TABLE FUNCTION`. См. примеры ниже. + +## Особенности реализации {#implementation-details} + +Запросы `SELECT` на стороне PostgreSQL выполняются как `COPY (SELECT ...) TO STDOUT` внутри транзакции PostgreSQL только на чтение с коммитом после каждого запроса `SELECT`. + +Простые условия для `WHERE` такие как `=`, `!=`, `>`, `>=`, `<`, `<=` и `IN` исполняются на стороне PostgreSQL сервера. + +Все операции объединения, аггрегации, сортировки, условия `IN [ array ]` и ограничения `LIMIT` выполняются на стороне ClickHouse только после того как запрос к PostgreSQL закончился. + +Запросы `INSERT` на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого запроса `INSERT`. + +PostgreSQL массивы конвертируются в массивы ClickHouse. + +!!! info "Примечание" + Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. + +При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`. **Примеры** @@ -58,14 +61,14 @@ PRIMARY KEY (int_id)); CREATE TABLE -postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2); +postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); INSERT 0 1 -postgresql> select * from test; - int_id | int_nullable | float | str | float_nullable ---------+--------------+-------+------+---------------- - 1 | | 2 | test | -(1 row) +postgresql> SELECT * FROM test; + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | + (1 row) ``` Получение данных в ClickHouse: @@ -80,7 +83,7 @@ SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'p └────────┴──────────────┴───────┴──────┴────────────────┘ ``` -Вставка: +Вставка данных: ```sql INSERT INTO TABLE FUNCTION postgresql('localhost:5432', 'test', 'test', 'postgrsql_user', 'password') (int_id, float) VALUES (2, 3); @@ -94,7 +97,24 @@ SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'p └────────┴──────────────┴───────┴──────┴────────────────┘ ``` -**Смотрите также** +Using Non-default Schema: -- [Движок таблиц ‘PostgreSQL’](../../sql-reference/table-functions/postgresql.md) +```text +postgres=# CREATE SCHEMA "nice.schema"; + +postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer); + +postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i) +``` + +```sql +CREATE TABLE pg_table_schema_with_dots (a UInt32) + ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); +``` + +**См. также** + +- [Движок таблиц PostgreSQL](../../sql-reference/table-functions/postgresql.md) - [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/table-functions/postgresql.md#dicts-external_dicts_dict_sources-postgresql) + +[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/table-functions/postgresql/) diff --git a/docs/ru/sql-reference/table-functions/s3.md b/docs/ru/sql-reference/table-functions/s3.md index 1d3fc8cfdb7..e062e59c67c 100644 --- a/docs/ru/sql-reference/table-functions/s3.md +++ b/docs/ru/sql-reference/table-functions/s3.md @@ -18,7 +18,7 @@ s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compres - `path` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные знаки в режиме "только чтение": `*, ?, {abc,def} и {N..M}` где `N, M` — числа, `'abc', 'def'` — строки. Подробнее смотри [здесь](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `format` — [формат](../../interfaces/formats.md#formats) файла. - `structure` — cтруктура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — автоматически обнаруживает сжатие по расширению файла. Возможные значения: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Необязательный параметр. +- `compression` — автоматически обнаруживает сжатие по расширению файла. Возможные значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Необязательный параметр. **Возвращаемые значения** diff --git a/docs/tools/README.md b/docs/tools/README.md index 0a6c41d8089..4340561fa57 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -51,5 +51,5 @@ The easiest way to see the result is to use `--livereload=8888` argument of buil At the moment there’s no easy way to do just that, but you can consider: -- To hit the “Watch” button on top of GitHub web interface to know as early as possible, even during pull request. Alternative to this is `#github-activity` channel of [public ClickHouse Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-nwwakmk4-xOJ6cdy0sJC3It8j348~IA). +- To hit the “Watch” button on top of GitHub web interface to know as early as possible, even during pull request. Alternative to this is `#github-activity` channel of [public ClickHouse Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-qfort0u8-TWqK4wIP0YSdoDE0btKa1w). - Some search engines allow to subscribe on specific website changes via email and you can opt-in for that for https://clickhouse.tech. diff --git a/docs/tools/amp.py b/docs/tools/amp.py index b08b58d3cba..22417407946 100644 --- a/docs/tools/amp.py +++ b/docs/tools/amp.py @@ -62,7 +62,6 @@ def build_amp(lang, args, cfg): for root, _, filenames in os.walk(site_temp): if 'index.html' in filenames: paths.append(prepare_amp_html(lang, args, root, site_temp, main_site_dir)) - test.test_amp(paths, lang) logging.info(f'Finished building AMP version for {lang}') diff --git a/docs/tools/blog.py b/docs/tools/blog.py index c3261f61d4d..d0f2496f914 100644 --- a/docs/tools/blog.py +++ b/docs/tools/blog.py @@ -40,7 +40,7 @@ def build_for_lang(lang, args): site_names = { 'en': 'ClickHouse Blog', - 'ru': 'Блог ClickHouse ' + 'ru': 'Блог ClickHouse' } assert len(site_names) == len(languages) @@ -62,7 +62,7 @@ def build_for_lang(lang, args): strict=True, theme=theme_cfg, nav=blog_nav, - copyright='©2016–2020 Yandex LLC', + copyright='©2016–2021 Yandex LLC', use_directory_urls=True, repo_name='ClickHouse/ClickHouse', repo_url='https://github.com/ClickHouse/ClickHouse/', diff --git a/docs/tools/build.py b/docs/tools/build.py index dfb9661c326..61112d5a4f5 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -65,8 +65,6 @@ def build_for_lang(lang, args): languages = { 'en': 'English', 'zh': '中文', - 'es': 'Español', - 'fr': 'Français', 'ru': 'Русский', 'ja': '日本語' } @@ -74,8 +72,6 @@ def build_for_lang(lang, args): site_names = { 'en': 'ClickHouse %s Documentation', 'zh': 'ClickHouse文档 %s', - 'es': 'Documentación de ClickHouse %s', - 'fr': 'Documentation ClickHouse %s', 'ru': 'Документация ClickHouse %s', 'ja': 'ClickHouseドキュメント %s' } @@ -98,7 +94,7 @@ def build_for_lang(lang, args): site_dir=site_dir, strict=True, theme=theme_cfg, - copyright='©2016–2020 Yandex LLC', + copyright='©2016–2021 Yandex LLC', use_directory_urls=True, repo_name='ClickHouse/ClickHouse', repo_url='https://github.com/ClickHouse/ClickHouse/', @@ -183,7 +179,7 @@ if __name__ == '__main__': website_dir = os.path.join(src_dir, 'website') arg_parser = argparse.ArgumentParser() - arg_parser.add_argument('--lang', default='en,es,fr,ru,zh,ja') + arg_parser.add_argument('--lang', default='en,ru,zh,ja') arg_parser.add_argument('--blog-lang', default='en,ru') arg_parser.add_argument('--docs-dir', default='.') arg_parser.add_argument('--theme-dir', default=website_dir) diff --git a/docs/tools/make_links.sh b/docs/tools/make_links.sh index c1194901f8f..801086178bf 100755 --- a/docs/tools/make_links.sh +++ b/docs/tools/make_links.sh @@ -8,7 +8,7 @@ BASE_DIR=$(dirname $(readlink -f $0)) function do_make_links() { set -x - langs=(en es zh fr ru ja) + langs=(en zh ru ja) src_file="$1" for lang in "${langs[@]}" do diff --git a/docs/tools/nav.py b/docs/tools/nav.py index 291797a1633..db64d1ba404 100644 --- a/docs/tools/nav.py +++ b/docs/tools/nav.py @@ -31,7 +31,16 @@ def build_nav_entry(root, args): result_items.append((prio, title, payload)) elif filename.endswith('.md'): path = os.path.join(root, filename) - meta, content = util.read_md_file(path) + + meta = '' + content = '' + + try: + meta, content = util.read_md_file(path) + except: + print('Error in file: {}'.format(path)) + raise + path = path.split('/', 2)[-1] title = meta.get('toc_title', find_first_header(content)) if title: diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index b88df5a03cb..a1e650d3ad3 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -109,7 +109,8 @@ def build_single_page_version(lang, args, nav, cfg): extra['single_page'] = True extra['is_amp'] = False - with open(os.path.join(args.docs_dir, lang, 'single.md'), 'w') as single_md: + single_md_path = os.path.join(args.docs_dir, lang, 'single.md') + with open(single_md_path, 'w') as single_md: concatenate(lang, args.docs_dir, single_md, nav) with util.temp_dir() as site_temp: @@ -221,3 +222,7 @@ def build_single_page_version(lang, args, nav, cfg): subprocess.check_call(' '.join(create_pdf_command), shell=True) logging.info(f'Finished building single page version for {lang}') + + if os.path.exists(single_md_path): + os.unlink(single_md_path) + \ No newline at end of file diff --git a/docs/tools/test.py b/docs/tools/test.py index 00d1d47137f..ada4df29644 100755 --- a/docs/tools/test.py +++ b/docs/tools/test.py @@ -3,34 +3,9 @@ import logging import os import sys - import bs4 - -import logging -import os import subprocess -import bs4 - - -def test_amp(paths, lang): - try: - # Get latest amp validator version - subprocess.check_call('amphtml-validator --help', - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - shell=True) - except subprocess.CalledProcessError: - subprocess.check_call('npm i -g amphtml-validator', stderr=subprocess.DEVNULL, shell=True) - - paths = ' '.join(paths) - command = f'amphtml-validator {paths}' - try: - subprocess.check_output(command, shell=True).decode('utf-8') - except subprocess.CalledProcessError: - logging.error(f'Invalid AMP for {lang}') - raise - def test_template(template_path): if template_path.endswith('amp.html'): diff --git a/docs/tools/website.py b/docs/tools/website.py index 6927fbd87bb..f0346de5c94 100644 --- a/docs/tools/website.py +++ b/docs/tools/website.py @@ -155,10 +155,6 @@ def build_website(args): os.path.join(args.src_dir, 'utils', 'list-versions', 'version_date.tsv'), os.path.join(args.output_dir, 'data', 'version_date.tsv')) - shutil.copy2( - os.path.join(args.website_dir, 'js', 'embedd.min.js'), - os.path.join(args.output_dir, 'js', 'embedd.min.js')) - for root, _, filenames in os.walk(args.output_dir): for filename in filenames: if filename == 'main.html': diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md index c74ffa93e9a..e0a297f51c8 100644 --- a/docs/zh/commercial/cloud.md +++ b/docs/zh/commercial/cloud.md @@ -31,7 +31,7 @@ toc_title: 云 ## 阿里云 {#alibaba-cloud} -阿里云的 ClickHouse 托管服务 [中国站](https://www.aliyun.com/product/clickhouse) (国际站于2021年5月初开放) 提供以下主要功能: +[阿里云的 ClickHouse 托管服务](https://www.alibabacloud.com/zh/product/clickhouse) 提供以下主要功能: - 基于阿里飞天分布式系统的高可靠云盘存储引擎 - 按需扩容,无需手动进行数据搬迁 diff --git a/docs/zh/development/build.md b/docs/zh/development/build.md index 1aa5c1c97b7..01e0740bfa4 100644 --- a/docs/zh/development/build.md +++ b/docs/zh/development/build.md @@ -35,28 +35,12 @@ sudo apt-get install git cmake ninja-build 或cmake3而不是旧系统上的cmake。 或者在早期版本的系统中用 cmake3 替代 cmake -## 安装 GCC 10 {#an-zhuang-gcc-10} +## 安装 Clang -有几种方法可以做到这一点。 +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -### 安装 PPA 包 {#an-zhuang-ppa-bao} - -``` bash -sudo apt-get install software-properties-common -sudo apt-add-repository ppa:ubuntu-toolchain-r/test -sudo apt-get update -sudo apt-get install gcc-10 g++-10 -``` - -### 源码安装 gcc {#yuan-ma-an-zhuang-gcc} - -请查看 [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) - -## 使用 GCC 10 来编译 {#shi-yong-gcc-10-lai-bian-yi} - -``` bash -export CC=gcc-10 -export CXX=g++-10 +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" ``` ## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma-1} diff --git a/docs/zh/development/developer-instruction.md b/docs/zh/development/developer-instruction.md index 53aab5dc086..04950c11521 100644 --- a/docs/zh/development/developer-instruction.md +++ b/docs/zh/development/developer-instruction.md @@ -123,17 +123,13 @@ ClickHouse使用多个外部库进行构建。大多数外部库不需要单独 # C++ 编译器 {#c-bian-yi-qi} -GCC编译器从版本9开始,以及Clang版本\>=8都可支持构建ClickHouse。 +We support clang starting from version 11. -Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性能较好(根据测评,最多可以相差几个百分点)。Clang通常可以更加便捷的开发。我们的持续集成(CI)平台会运行大约十二种构建组合的检查。 +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -在Ubuntu上安装GCC,请执行:`sudo apt install gcc g++` - -请使用`gcc --version`查看gcc的版本。如果gcc版本低于9,请参考此处的指示:https://clickhouse.tech/docs/zh/development/build/#an-zhuang-gcc-10 。 - -在Mac OS X上安装GCC,请执行:`brew install gcc` - -如果您决定使用Clang,还可以同时安装 `libc++`以及`lld`,前提是您也熟悉它们。此外,也推荐使用`ccache`。 +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` # 构建的过程 {#gou-jian-de-guo-cheng} @@ -146,7 +142,7 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性 在`build`目录下,通过运行CMake配置构建。 在第一次运行之前,请定义用于指定编译器的环境变量(本示例中为gcc 9 编译器)。 - export CC=gcc-10 CXX=g++-10 + export CC=clang CXX=clang++ cmake .. `CC`变量指代C的编译器(C Compiler的缩写),而`CXX`变量指代要使用哪个C++编译器进行编译。 diff --git a/docs/zh/development/style.md b/docs/zh/development/style.md index c8e883920dd..bb9bfde7b9b 100644 --- a/docs/zh/development/style.md +++ b/docs/zh/development/style.md @@ -696,7 +696,7 @@ auto s = std::string{"Hello"}; **2.** 语言: C++20. -**3.** 编译器: `gcc`。 此时(2020年08月),代码使用9.3版编译。(它也可以使用`clang 8` 编译) +**3.** 编译器: `clang`。 此时(2021年03月),代码使用11版编译。(它也可以使用`gcc` 编译 but it is not suitable for production) 使用标准库 (`libc++`)。 diff --git a/docs/zh/engines/table-engines/integrations/odbc.md b/docs/zh/engines/table-engines/integrations/odbc.md index 1264efeaa41..767c32cc438 100644 --- a/docs/zh/engines/table-engines/integrations/odbc.md +++ b/docs/zh/engines/table-engines/integrations/odbc.md @@ -7,11 +7,11 @@ toc_title: ODBC # ODBC {#table-engine-odbc} -允许ClickHouse通过以下方式连接到外部数据库 [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). +允许ClickHouse通过[ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity)方式连接到外部数据库. -为了安全地实现ODBC连接,ClickHouse使用单独的程序 `clickhouse-odbc-bridge`. 如果直接从ODBC驱动程序加载 `clickhouse-server`,驱动程序问题可能会导致ClickHouse服务器崩溃。 ClickHouse自动启动 `clickhouse-odbc-bridge` 当它是必需的。 ODBC桥程序是从相同的软件包作为安装 `clickhouse-server`. +为了安全地实现ODBC连接,ClickHouse使用了一个独立程序 `clickhouse-odbc-bridge`. 如果ODBC驱动程序是直接从 `clickhouse-server`中加载的,那么驱动问题可能会导致ClickHouse服务崩溃。 当有需要时,ClickHouse会自动启动 `clickhouse-odbc-bridge`。 ODBC桥梁程序与`clickhouse-server`来自相同的安装包. -该引擎支持 [可为空](../../../sql-reference/data-types/nullable.md) 数据类型。 +该引擎支持 [可为空](../../../sql-reference/data-types/nullable.md) 的数据类型。 ## 创建表 {#creating-a-table} @@ -25,14 +25,14 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ENGINE = ODBC(connection_settings, external_database, external_table) ``` -请参阅的详细说明 [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) 查询。 +详情请见 [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) 查询。 表结构可以与源表结构不同: - 列名应与源表中的列名相同,但您可以按任何顺序使用其中的一些列。 -- 列类型可能与源表中的列类型不同。 ClickHouse尝试 [投](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) ClickHouse数据类型的值。 +- 列类型可能与源表中的列类型不同。 ClickHouse尝试将数值[映射](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) 到ClickHouse的数据类型。 -**发动机参数** +**引擎参数** - `connection_settings` — Name of the section with connection settings in the `odbc.ini` 文件 - `external_database` — Name of a database in an external DBMS. @@ -40,13 +40,13 @@ ENGINE = ODBC(connection_settings, external_database, external_table) ## 用法示例 {#usage-example} -**通过ODBC从本地MySQL安装中检索数据** +**通过ODBC从本地安装的MySQL中检索数据** -此示例检查Ubuntu Linux18.04和MySQL服务器5.7。 +本示例针对Ubuntu Linux18.04和MySQL服务器5.7进行检查。 -确保安装了unixODBC和MySQL连接器。 +请确保安装了unixODBC和MySQL连接器。 -默认情况下(如果从软件包安装),ClickHouse以用户身份启动 `clickhouse`. 因此,您需要在MySQL服务器中创建和配置此用户。 +默认情况下(如果从软件包安装),ClickHouse以用户`clickhouse`的身份启动 . 因此,您需要在MySQL服务器中创建和配置此用户。 ``` bash $ sudo mysql @@ -57,7 +57,7 @@ mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; ``` -然后配置连接 `/etc/odbc.ini`. +然后在`/etc/odbc.ini`中配置连接 . ``` bash $ cat /etc/odbc.ini @@ -70,7 +70,7 @@ USERNAME = clickhouse PASSWORD = clickhouse ``` -您可以使用 `isql` unixodbc安装中的实用程序。 +您可以从安装的unixodbc中使用 `isql` 实用程序来检查连接情况。 ``` bash $ isql -v mysqlconn diff --git a/docs/zh/getting-started/example-datasets/ontime.md b/docs/zh/getting-started/example-datasets/ontime.md index 3921f71fc7e..6d888b2196c 100644 --- a/docs/zh/getting-started/example-datasets/ontime.md +++ b/docs/zh/getting-started/example-datasets/ontime.md @@ -29,126 +29,127 @@ done 创建表结构: ``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String +CREATE TABLE `ontime` +( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `Reporting_Airline` String, + `DOT_ID_Reporting_Airline` Int32, + `IATA_CODE_Reporting_Airline` String, + `Tail_Number` Int32, + `Flight_Number_Reporting_Airline` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Nullable(Int32), + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String ) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; + PARTITION BY Year + ORDER BY (IATA_CODE_Reporting_Airline, FlightDate) + SETTINGS index_granularity = 8192; ``` 加载数据: ``` bash -$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'" ``` ## 下载预处理好的分区数据 {#xia-zai-yu-chu-li-hao-de-fen-qu-shu-ju} @@ -212,7 +213,7 @@ LIMIT 10; Q4. 查询2007年各航空公司延误超过10分钟以上的次数 ``` sql -SELECT Carrier, count(*) +SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier @@ -226,29 +227,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` 更好的查询版本: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year=2007 GROUP BY Carrier @@ -262,29 +263,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` 更好的查询版本: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier @@ -303,7 +304,7 @@ FROM from ontime WHERE DepDelay>10 GROUP BY Year -) +) q JOIN ( select @@ -311,7 +312,7 @@ JOIN count(*) as c2 from ontime GROUP BY Year -) USING (Year) +) qq USING (Year) ORDER BY Year; ``` @@ -346,7 +347,7 @@ Q10. ``` sql SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, + min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, sum(ArrDelayMinutes>30) AS flights_delayed, round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime diff --git a/docs/zh/operations/backup.md b/docs/zh/operations/backup.md index 1b1993e3ae6..6d517e6ccb3 100644 --- a/docs/zh/operations/backup.md +++ b/docs/zh/operations/backup.md @@ -7,37 +7,37 @@ toc_title: "\u6570\u636E\u5907\u4EFD" # 数据备份 {#data-backup} -尽管[副本](../engines/table-engines/mergetree-family/replication.md) 可以预防硬件错误带来的数据丢失, 但是它不能防止人为操作的错误: 意外删除数据, 删除错误的 table 或者删除错误 cluster 上的 table, 可以导致错误数据处理错误或者数据损坏的 bugs. 这类意外可能会影响所有的副本. ClickHouse 有内建的保障措施可以预防一些错误 — 例如, 默认情况下[您不能使用类似MergeTree的引擎删除包含超过50Gb数据的表](server-configuration-parameters/settings.md#max-table-size-to-drop). 但是,这些保障措施不能涵盖所有可能的情况,并且可以规避。 +尽管 [副本] (../engines/table-engines/mergetree-family/replication.md) 可以提供针对硬件的错误防护, 但是它不能预防人为操作失误: 数据的意外删除, 错误表的删除或者错误集群上表的删除, 以及导致错误数据处理或者数据损坏的软件bug. 在很多案例中,这类意外可能会影响所有的副本. ClickHouse 有内置的保护措施可以预防一些错误 — 例如, 默认情况下 [不能人工删除使用带有MergeTree引擎且包含超过50Gb数据的表] (server-configuration-parameters/settings.md#max-table-size-to-drop). 但是,这些保护措施不能覆盖所有可能情况,并且这些措施可以被绕过。 -为了有效地减少可能的人为错误,您应该 **提前**准备备份和还原数据的策略. +为了有效地减少可能的人为错误,您应该 **提前** 仔细的准备备份和数据还原的策略. -不同公司有不同的可用资源和业务需求,因此没有适合各种情况的ClickHouse备份和恢复通用解决方案。 适用于 1GB 的数据的方案可能并不适用于几十 PB 数据的情况。 有多种可能的并有自己优缺点的方法,这将在下面讨论。 好的主意是同时结合使用多种方法而不是仅使用一种,这样可以弥补不同方法各自的缺点。 +不同公司有不同的可用资源和业务需求,因此不存在一个通用的解决方案可以应对各种情况下的ClickHouse备份和恢复。 适用于 1GB 数据的方案可能并不适用于几十 PB 数据的情况。 有多种具备各自优缺点的可能方法,将在下面对其进行讨论。最好使用几种方法而不是仅仅使用一种方法来弥补它们的各种缺点。。 !!! note "注" - 请记住,如果您备份了某些内容并且从未尝试过还原它,那么当您实际需要它时(或者至少需要比业务能够容忍的时间更长),恢复可能无法正常工作。 因此,无论您选择哪种备份方法,请确保自动还原过程,并定期在备用ClickHouse群集上练习。 + 需要注意的是,如果您备份了某些内容并且从未尝试过还原它,那么当您实际需要它时可能无法正常恢复(或者至少需要的时间比业务能够容忍的时间更长)。 因此,无论您选择哪种备份方法,请确保自动还原过程,并定期在备用ClickHouse群集上演练。 -## 将源数据复制到其他地方 {#duplicating-source-data-somewhere-else} +## 将源数据复制到其它地方 {#duplicating-source-data-somewhere-else} -通常被聚集到ClickHouse的数据是通过某种持久队列传递的,例如 [Apache Kafka](https://kafka.apache.org). 在这种情况下,可以配置一组额外的订阅服务器,这些订阅服务器将在写入ClickHouse时读取相同的数据流,并将其存储在冷存储中。 大多数公司已经有一些默认的推荐冷存储,可能是对象存储或分布式文件系统,如 [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). +通常摄入到ClickHouse的数据是通过某种持久队列传递的,例如 [Apache Kafka] (https://kafka.apache.org). 在这种情况下,可以配置一组额外的订阅服务器,这些订阅服务器将在写入ClickHouse时读取相同的数据流,并将其存储在冷存储中。 大多数公司已经有一些默认推荐的冷存储,可能是对象存储或分布式文件系统,如 [HDFS] (https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). ## 文件系统快照 {#filesystem-snapshots} -某些本地文件系统提供快照功能(例如, [ZFS](https://en.wikipedia.org/wiki/ZFS)),但它们可能不是提供实时查询的最佳选择。 一个可能的解决方案是使用这种文件系统创建额外的副本,并将它们从 [分布](../engines/table-engines/special/distributed.md) 用于以下目的的表 `SELECT` 查询。 任何修改数据的查询都无法访问此类副本上的快照。 作为奖励,这些副本可能具有特殊的硬件配置,每个服务器附加更多的磁盘,这将是经济高效的。 +某些本地文件系统提供快照功能(例如, [ZFS] (https://en.wikipedia.org/wiki/ZFS)),但它们可能不是提供实时查询的最佳选择。 一个可能的解决方案是使用这种文件系统创建额外的副本,并将它们与用于`SELECT` 查询的 [分布式] (../engines/table-engines/special/distributed.md) 表分离。 任何修改数据的查询都无法访问此类副本上的快照。 作为回报,这些副本可能具有特殊的硬件配置,每个服务器附加更多的磁盘,这将是经济高效的。 ## clickhouse-copier {#clickhouse-copier} -[clickhouse-copier](utilities/clickhouse-copier.md) 是一个多功能工具,最初创建用于重新分片pb大小的表。 因为它可以在ClickHouse表和集群之间可靠地复制数据,所以它还可用于备份和还原数据。 +[clickhouse-copier] (utilities/clickhouse-copier.md) 是一个多功能工具,最初创建它是为了用于重新切分pb大小的表。 因为它能够在ClickHouse表和集群之间可靠地复制数据,所以它也可用于备份和还原数据。 对于较小的数据量,一个简单的 `INSERT INTO ... SELECT ...` 到远程表也可以工作。 -## 部件操作 {#manipulations-with-parts} +## part操作 {#manipulations-with-parts} -ClickHouse允许使用 `ALTER TABLE ... FREEZE PARTITION ...` 查询以创建表分区的本地副本。 这是利用硬链接(hardlink)到 `/var/lib/clickhouse/shadow/` 文件夹中实现的,所以它通常不会占用旧数据的额外磁盘空间。 创建的文件副本不由ClickHouse服务器处理,所以你可以把它们留在那里:你将有一个简单的备份,不需要任何额外的外部系统,但它仍然会容易出现硬件问题。 出于这个原因,最好将它们远程复制到另一个位置,然后删除本地副本。 分布式文件系统和对象存储仍然是一个不错的选择,但是具有足够大容量的正常附加文件服务器也可以工作(在这种情况下,传输将通过网络文件系统 [rsync](https://en.wikipedia.org/wiki/Rsync)). +ClickHouse允许使用 `ALTER TABLE ... FREEZE PARTITION ...` 查询以创建表分区的本地副本。 这是利用硬链接(hardlink)到 `/var/lib/clickhouse/shadow/` 文件夹中实现的,所以它通常不会因为旧数据而占用额外的磁盘空间。 创建的文件副本不由ClickHouse服务器处理,所以你可以把它们留在那里:你将有一个简单的备份,不需要任何额外的外部系统,但它仍然容易出现硬件问题。 出于这个原因,最好将它们远程复制到另一个位置,然后删除本地副本。 分布式文件系统和对象存储仍然是一个不错的选择,但是具有足够大容量的正常附加文件服务器也可以工作(在这种情况下,传输将通过网络文件系统或者也许是 [rsync] (https://en.wikipedia.org/wiki/Rsync) 来进行). 数据可以使用 `ALTER TABLE ... ATTACH PARTITION ...` 从备份中恢复。 -有关与分区操作相关的查询的详细信息,请参阅 [更改文档](../sql-reference/statements/alter.md#alter_manipulations-with-partitions). +有关与分区操作相关的查询的详细信息,请参阅 [更改文档] (../sql-reference/statements/alter.md#alter_manipulations-with-partitions). -第三方工具可用于自动化此方法: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup). +第三方工具可用于自动化此方法: [clickhouse-backup] (https://github.com/AlexAkulov/clickhouse-backup). -[原始文章](https://clickhouse.tech/docs/en/operations/backup/) +[原始文章] (https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/zh/operations/system-tables/data_type_families.md b/docs/zh/operations/system-tables/data_type_families.md index 21eb4785e23..db08ff0371b 100644 --- a/docs/zh/operations/system-tables/data_type_families.md +++ b/docs/zh/operations/system-tables/data_type_families.md @@ -5,13 +5,13 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 # 系统。data_type_families {#system_tables-data_type_families} -包含有关受支持的信息 [数据类型](../../sql-reference/data-types/). +包含有关受支持的[数据类型](../../sql-reference/data-types/)的信息. -列: +列字段包括: -- `name` ([字符串](../../sql-reference/data-types/string.md)) — Data type name. -- `case_insensitive` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Property that shows whether you can use a data type name in a query in case insensitive manner or not. For example, `Date` 和 `date` 都是有效的。 -- `alias_to` ([字符串](../../sql-reference/data-types/string.md)) — Data type name for which `name` 是个化名 +- `name` ([String](../../sql-reference/data-types/string.md)) — 数据类型的名称. +- `case_insensitive` ([UInt8](../../sql-reference/data-types/int-uint.md)) — 该属性显示是否可以在查询中以不区分大小写的方式使用数据类型名称。例如 `Date` 和 `date` 都是有效的。 +- `alias_to` ([String](../../sql-reference/data-types/string.md)) — 名称为别名的数据类型名称。 **示例** @@ -36,4 +36,4 @@ SELECT * FROM system.data_type_families WHERE alias_to = 'String' **另请参阅** -- [语法](../../sql-reference/syntax.md) — Information about supported syntax. +- [Syntax](../../sql-reference/syntax.md) — 关于所支持的语法信息. diff --git a/docs/zh/operations/system-tables/functions.md b/docs/zh/operations/system-tables/functions.md index ff716b0bc6c..8229a94cd5c 100644 --- a/docs/zh/operations/system-tables/functions.md +++ b/docs/zh/operations/system-tables/functions.md @@ -1,13 +1,30 @@ ---- -machine_translated: true -machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 ---- +# system.functions {#system-functions} -# 系统。功能 {#system-functions} - -包含有关正常函数和聚合函数的信息。 +包含有关常规函数和聚合函数的信息。 列: - `name`(`String`) – The name of the function. - `is_aggregate`(`UInt8`) — Whether the function is aggregate. + +**举例** +``` + SELECT * FROM system.functions LIMIT 10; +``` + +``` +┌─name─────────────────────┬─is_aggregate─┬─case_insensitive─┬─alias_to─┐ +│ sumburConsistentHash │ 0 │ 0 │ │ +│ yandexConsistentHash │ 0 │ 0 │ │ +│ demangle │ 0 │ 0 │ │ +│ addressToLine │ 0 │ 0 │ │ +│ JSONExtractRaw │ 0 │ 0 │ │ +│ JSONExtractKeysAndValues │ 0 │ 0 │ │ +│ JSONExtract │ 0 │ 0 │ │ +│ JSONExtractString │ 0 │ 0 │ │ +│ JSONExtractFloat │ 0 │ 0 │ │ +│ JSONExtractInt │ 0 │ 0 │ │ +└──────────────────────────┴──────────────┴──────────────────┴──────────┘ + +10 rows in set. Elapsed: 0.002 sec. +``` diff --git a/docs/zh/operations/system-tables/index.md b/docs/zh/operations/system-tables/index.md index 56067bc5057..0e5778e3051 100644 --- a/docs/zh/operations/system-tables/index.md +++ b/docs/zh/operations/system-tables/index.md @@ -7,33 +7,33 @@ toc_title: "\u7CFB\u7EDF\u8868" # 系统表 {#system-tables} -## 导言 {#system-tables-introduction} +## 引言 {#system-tables-introduction} -系统表提供以下信息: +系统表提供的信息如下: -- 服务器状态、进程和环境。 +- 服务器的状态、进程以及环境。 - 服务器的内部进程。 系统表: -- 坐落于 `system` 数据库。 -- 仅适用于读取数据。 -- 不能删除或更改,但可以分离。 +- 存储于 `system` 数据库。 +- 仅提供数据读取功能。 +- 不能被删除或更改,但可以对其进行分离(detach)操作。 -大多数系统表将数据存储在RAM中。 ClickHouse服务器在开始时创建此类系统表。 +大多数系统表将其数据存储在RAM中。 一个ClickHouse服务在刚启动时便会创建此类系统表。 -与其他系统表不同,系统日志表 [metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log), [query_log](../../operations/system-tables/query_log.md#system_tables-query_log), [query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log), [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log), [part_log](../../operations/system-tables/part_log.md#system.part_log), crash_log and text_log 默认采用[MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 引擎并将其数据存储在存储文件系统中。 如果从文件系统中删除表,ClickHouse服务器会在下一次写入数据时再次创建空表。 如果系统表架构在新版本中发生更改,则ClickHouse会重命名当前表并创建一个新表。 +不同于其他系统表,系统日志表 [metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log), [query_log](../../operations/system-tables/query_log.md#system_tables-query_log), [query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log), [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log), [part_log](../../operations/system-tables/part_log.md#system.part_log), crash_log and text_log 默认采用[MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 引擎并将其数据存储在文件系统中。 如果人为的从文件系统中删除表,ClickHouse服务器会在下一次进行数据写入时再次创建空表。 如果系统表结构在新版本中发生更改,那么ClickHouse会重命名当前表并创建一个新表。 -用户可以通过在`/etc/clickhouse-server/config.d/`下创建与系统表同名的配置文件, 或者在`/etc/clickhouse-server/config.xml`中设置相应配置项,来自定义系统日志表的结构。可以自定义的配置项如下: +用户可以通过在`/etc/clickhouse-server/config.d/`下创建与系统表同名的配置文件, 或者在`/etc/clickhouse-server/config.xml`中设置相应配置项,来自定义系统日志表的结构。可供自定义的配置项如下: -- `database`: 系统日志表所在的数据库。这个选项目前已经废弃。所有的系统日表都位于`system`库中。 -- `table`: 系统日志表名。 +- `database`: 系统日志表所在的数据库。这个选项目前已经不推荐使用。所有的系统日表都位于`system`库中。 +- `table`: 接收数据写入的系统日志表。 - `partition_by`: 指定[PARTITION BY](../../engines/table-engines/mergetree-family/custom-partitioning-key.md)表达式。 - `ttl`: 指定系统日志表TTL选项。 -- `flush_interval_milliseconds`: 指定系统日志表数据落盘时间。 -- `engine`: 指定完整的表引擎定义。(以`ENGINE = `开始)。 这个选项与`partition_by`以及`ttl`冲突。如果两者一起设置,服务启动时会抛出异常并且退出。 +- `flush_interval_milliseconds`: 指定日志表数据刷新到磁盘的时间间隔。 +- `engine`: 指定完整的表引擎定义。(以`ENGINE = `开头)。 这个选项与`partition_by`以及`ttl`冲突。如果与两者一起设置,服务启动时会抛出异常并且退出。 -一个配置定义的例子如下: +配置定义的示例如下: ``` @@ -50,20 +50,20 @@ toc_title: "\u7CFB\u7EDF\u8868" ``` -默认情况下,表增长是无限的。 要控制表的大小,可以使用 TTL 删除过期日志记录的设置。 你也可以使用分区功能 `MergeTree`-发动机表。 +默认情况下,表增长是无限的。可以通过TTL 删除过期日志记录的设置来控制表的大小。 你也可以使用分区功能 `MergeTree`-引擎表。 ## 系统指标的来源 {#system-tables-sources-of-system-metrics} 用于收集ClickHouse服务器使用的系统指标: - `CAP_NET_ADMIN` 能力。 -- [procfs](https://en.wikipedia.org/wiki/Procfs) (仅在Linux中)。 +- [procfs](https://en.wikipedia.org/wiki/Procfs) (仅限于Linux)。 **procfs** -如果ClickHouse服务器没有 `CAP_NET_ADMIN` 能力,它试图回落到 `ProcfsMetricsProvider`. `ProcfsMetricsProvider` 允许收集每个查询系统指标(用于CPU和I/O)。 +如果ClickHouse服务器没有 `CAP_NET_ADMIN` 能力,那么它将试图退回到 `ProcfsMetricsProvider`. `ProcfsMetricsProvider` 允许收集每个查询系统指标(包括CPU和I/O)。 -如果系统上支持并启用procfs,ClickHouse server将收集这些指标: +如果系统上支持并启用procfs,ClickHouse server将收集如下指标: - `OSCPUVirtualTimeMicroseconds` - `OSCPUWaitMicroseconds` diff --git a/docs/zh/sql-reference/data-types/special-data-types/interval.md b/docs/zh/sql-reference/data-types/special-data-types/interval.md index df2ce097df0..9df25e3f555 100644 --- a/docs/zh/sql-reference/data-types/special-data-types/interval.md +++ b/docs/zh/sql-reference/data-types/special-data-types/interval.md @@ -5,9 +5,9 @@ toc_priority: 61 toc_title: "\u95F4\u9694" --- -# 间隔 {#data-type-interval} +# Interval类型 {#data-type-interval} -表示时间和日期间隔的数据类型族。 由此产生的类型 [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) 接线员 +表示时间和日期间隔的数据类型家族。 [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) 运算的结果类型。 !!! warning "警告" `Interval` 数据类型值不能存储在表中。 @@ -15,7 +15,7 @@ toc_title: "\u95F4\u9694" 结构: - 时间间隔作为无符号整数值。 -- 间隔的类型。 +- 时间间隔的类型。 支持的时间间隔类型: @@ -28,7 +28,7 @@ toc_title: "\u95F4\u9694" - `QUARTER` - `YEAR` -对于每个间隔类型,都有一个单独的数据类型。 例如, `DAY` 间隔对应于 `IntervalDay` 数据类型: +对于每个时间间隔类型,都有一个单独的数据类型。 例如, `DAY` 间隔对应于 `IntervalDay` 数据类型: ``` sql SELECT toTypeName(INTERVAL 4 DAY) @@ -42,7 +42,7 @@ SELECT toTypeName(INTERVAL 4 DAY) ## 使用说明 {#data-type-interval-usage-remarks} -您可以使用 `Interval`-在算术运算类型值 [日期](../../../sql-reference/data-types/date.md) 和 [日期时间](../../../sql-reference/data-types/datetime.md)-类型值。 例如,您可以将4天添加到当前时间: +您可以在与 [日期](../../../sql-reference/data-types/date.md) 和 [日期时间](../../../sql-reference/data-types/datetime.md) 类型值的算术运算中使用 `Interval` 类型值。 例如,您可以将4天添加到当前时间: ``` sql SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY @@ -54,10 +54,10 @@ SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY └─────────────────────┴───────────────────────────────┘ ``` -不同类型的间隔不能合并。 你不能使用间隔,如 `4 DAY 1 HOUR`. 以小于或等于间隔的最小单位的单位指定间隔,例如,间隔 `1 day and an hour` 间隔可以表示为 `25 HOUR` 或 `90000 SECOND`. - -你不能执行算术运算 `Interval`-类型值,但你可以添加不同类型的时间间隔,因此值 `Date` 或 `DateTime` 数据类型。 例如: +不同类型的间隔不能合并。 你不能使用诸如 `4 DAY 1 HOUR` 的时间间隔. 以小于或等于时间间隔最小单位的单位来指定间隔,例如,时间间隔 `1 day and an hour` 可以表示为 `25 HOUR` 或 `90000 SECOND`. +你不能对 `Interval` 类型的值执行算术运算,但你可以向 `Date` 或 `DateTime` 数据类型的值添加不同类型的时间间隔,例如: + ``` sql SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR ``` @@ -81,5 +81,5 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu ## 另请参阅 {#see-also} -- [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) 接线员 +- [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) 操作 - [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) 类型转换函数 diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index f981f442fb6..cbd88de0038 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -55,7 +55,7 @@ SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration 或 ``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) SETTINGS(format_csv_allow_single_quotes = 0) ``` @@ -87,7 +87,7 @@ SETTINGS(format_csv_allow_single_quotes = 0) 或 ``` sql -SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) ``` 设置字段: diff --git a/docs/zh/sql-reference/dictionaries/index.md b/docs/zh/sql-reference/dictionaries/index.md index 7e8f5e83aa7..092afd5bac1 100644 --- a/docs/zh/sql-reference/dictionaries/index.md +++ b/docs/zh/sql-reference/dictionaries/index.md @@ -8,15 +8,15 @@ toc_title: "\u5BFC\u8A00" # 字典 {#dictionaries} -字典是一个映射 (`key -> attributes`)这是方便各种类型的参考清单。 +字典是一个映射 (`键 -> 属性`), 是方便各种类型的参考清单。 -ClickHouse支持使用可用于查询的字典的特殊功能。 这是更容易和更有效地使用字典与功能比 `JOIN` 与参考表。 +ClickHouse支持一些特殊函数配合字典在查询中使用。 将字典与函数结合使用比将 `JOIN` 操作与引用表结合使用更简单、更有效。 [NULL](../../sql-reference/syntax.md#null-literal) 值不能存储在字典中。 ClickHouse支持: -- [内置字典](internal-dicts.md#internal_dicts) 具有特定的 [功能集](../../sql-reference/functions/ym-dict-functions.md). -- [插件(外部)字典](external-dictionaries/external-dicts.md#dicts-external-dicts) 用一个 [功能集](../../sql-reference/functions/ext-dict-functions.md). +- [内置字典](internal-dicts.md#internal_dicts) ,这些字典具有特定的 [函数集](../../sql-reference/functions/ym-dict-functions.md). +- [插件(外部)字典](external-dictionaries/external-dicts.md#dicts-external-dicts) ,这些字典拥有一个 [函数集](../../sql-reference/functions/ext-dict-functions.md). [原始文章](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/zh/sql-reference/functions/array-functions.md b/docs/zh/sql-reference/functions/array-functions.md index ac5dae3a97e..4f6dbc0d87d 100644 --- a/docs/zh/sql-reference/functions/array-functions.md +++ b/docs/zh/sql-reference/functions/array-functions.md @@ -606,7 +606,7 @@ SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; 如果要获取数组中唯一项的列表,可以使用arrayReduce(‘groupUniqArray’,arr)。 -## arryjoin(arr) {#array-functions-join} +## arrayJoin(arr) {#array-functions-join} 一个特殊的功能。请参见[«ArrayJoin函数»](array-join.md#functions_arrayjoin)部分。 diff --git a/docs/zh/sql-reference/functions/other-functions.md b/docs/zh/sql-reference/functions/other-functions.md index b17a5e89332..c58c4bd1510 100644 --- a/docs/zh/sql-reference/functions/other-functions.md +++ b/docs/zh/sql-reference/functions/other-functions.md @@ -477,6 +477,103 @@ FROM 1 rows in set. Elapsed: 0.002 sec. + +## indexHint {#indexhint} +输出符合索引选择范围内的所有数据,同时不实用参数中的表达式进行过滤。 + +传递给函数的表达式参数将不会被计算,但ClickHouse使用参数中的表达式进行索引过滤。 + +**返回值** + +- 1。 + +**示例** + +这是一个包含[ontime](../../getting-started/example-datasets/ontime.md)测试数据集的测试表。 + +``` +SELECT count() FROM ontime + +┌─count()─┐ +│ 4276457 │ +└─────────┘ +``` + +该表使用`(FlightDate, (Year, FlightDate))`作为索引。 + +对该表进行如下的查询: + +``` +:) SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-01-01 │ 13970 │ +│ 2017-01-02 │ 15882 │ +........................ +│ 2017-09-28 │ 16411 │ +│ 2017-09-29 │ 16384 │ +│ 2017-09-30 │ 12520 │ +└────────────┴─────────┘ + +273 rows in set. Elapsed: 0.072 sec. Processed 4.28 million rows, 8.55 MB (59.00 million rows/s., 118.01 MB/s.) +``` + +在这个查询中,由于没有使用索引,所以ClickHouse将处理整个表的所有数据(`Processed 4.28 million rows`)。使用下面的查询尝试使用索引进行查询: + +``` +:) SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE k = '2017-09-15' +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-09-15 │ 16428 │ +└────────────┴─────────┘ + +1 rows in set. Elapsed: 0.014 sec. Processed 32.74 thousand rows, 65.49 KB (2.31 million rows/s., 4.63 MB/s.) +``` + +在最后一行的显示中,通过索引ClickHouse处理的行数明显减少(`Processed 32.74 thousand rows`)。 + +现在将表达式`k = '2017-09-15'`传递给`indexHint`函数: + +``` +:) SELECT FlightDate AS k, count() FROM ontime WHERE indexHint(k = '2017-09-15') GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE indexHint(k = '2017-09-15') +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-09-14 │ 7071 │ +│ 2017-09-15 │ 16428 │ +│ 2017-09-16 │ 1077 │ +│ 2017-09-30 │ 8167 │ +└────────────┴─────────┘ + +4 rows in set. Elapsed: 0.004 sec. Processed 32.74 thousand rows, 65.49 KB (8.97 million rows/s., 17.94 MB/s.) +``` + +对于这个请求,根据ClickHouse显示ClickHouse与上一次相同的方式应用了索引(`Processed 32.74 thousand rows`)。但是,最终返回的结果集中并没有根据`k = '2017-09-15'`表达式进行过滤结果。 + +由于ClickHouse中使用稀疏索引,因此在读取范围时(本示例中为相邻日期),"额外"的数据将包含在索引结果中。使用`indexHint`函数可以查看到它们。 + ## 复制 {#replicate} 使用单个值填充一个数组。 diff --git a/docs/zh/sql-reference/statements/create.md b/docs/zh/sql-reference/statements/create.md index 639af0841dc..46e82bd1733 100644 --- a/docs/zh/sql-reference/statements/create.md +++ b/docs/zh/sql-reference/statements/create.md @@ -238,7 +238,7 @@ SELECT a, b, c FROM (SELECT ...) 当一个`SELECT`子句包含`DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`时,请注意,这些仅会在插入数据时在每个单独的数据块上执行。例如,如果你在其中包含了`GROUP BY`,则只会在查询期间进行聚合,但聚合范围仅限于单个批的写入数据。数据不会进一步被聚合。但是当你使用一些其他数据聚合引擎时这是例外的,如:`SummingMergeTree`。 -目前对物化视图执行`ALTER`是不支持的,因此这可能是不方便的。如果物化视图是使用的`TO [db.]name`的方式进行构建的,你可以使用`DETACH`语句现将视图剥离,然后使用`ALTER`运行在目标表上,然后使用`ATTACH`将之前剥离的表重新加载进来。 +目前对物化视图执行`ALTER`是不支持的,因此这可能是不方便的。如果物化视图是使用的`TO [db.]name`的方式进行构建的,你可以使用`DETACH`语句先将视图剥离,然后使用`ALTER`运行在目标表上,然后使用`ATTACH`将之前剥离的表重新加载进来。 视图看起来和普通的表相同。例如,你可以通过`SHOW TABLES`查看到它们。 diff --git a/docs/zh/sql-reference/statements/select/join.md b/docs/zh/sql-reference/statements/select/join.md index 2976484e09a..407c8ca6101 100644 --- a/docs/zh/sql-reference/statements/select/join.md +++ b/docs/zh/sql-reference/statements/select/join.md @@ -43,15 +43,15 @@ ClickHouse中提供的其他联接类型: Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting. -### ASOF加入使用 {#asof-join-usage} +### ASOF JOIN使用 {#asof-join-usage} `ASOF JOIN` 当您需要连接没有完全匹配的记录时非常有用。 -算法需要表中的特殊列。 本专栏: +该算法需要表中的特殊列。 该列需要满足: - 必须包含有序序列。 -- 可以是以下类型之一: [Int*,UInt*](../../../sql-reference/data-types/int-uint.md), [浮动\*](../../../sql-reference/data-types/float.md), [日期](../../../sql-reference/data-types/date.md), [日期时间](../../../sql-reference/data-types/datetime.md), [十进制\*](../../../sql-reference/data-types/decimal.md). -- 不能是唯一的列 `JOIN` +- 可以是以下类型之一: [Int*,UInt*](../../../sql-reference/data-types/int-uint.md), [Float\*](../../../sql-reference/data-types/float.md), [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md), [Decimal\*](../../../sql-reference/data-types/decimal.md). +- 不能是`JOIN`子句中唯一的列 语法 `ASOF JOIN ... ON`: @@ -62,9 +62,9 @@ ASOF LEFT JOIN table_2 ON equi_cond AND closest_match_cond ``` -您可以使用任意数量的相等条件和恰好一个最接近的匹配条件。 例如, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. +您可以使用任意数量的相等条件和一个且只有一个最接近的匹配条件。 例如, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. -支持最接近匹配的条件: `>`, `>=`, `<`, `<=`. +支持最接近匹配的运算符: `>`, `>=`, `<`, `<=`. 语法 `ASOF JOIN ... USING`: @@ -75,9 +75,9 @@ ASOF JOIN table_2 USING (equi_column1, ... equi_columnN, asof_column) ``` -`ASOF JOIN` 用途 `equi_columnX` 对于加入平等和 `asof_column` 用于加入与最接近的比赛 `table_1.asof_column >= table_2.asof_column` 条件。 该 `asof_column` 列总是在最后一个 `USING` 条款 +`table_1.asof_column >= table_2.asof_column` 中, `ASOF JOIN` 使用 `equi_columnX` 来进行条件匹配, `asof_column` 用于JOIN最接近匹配。 `asof_column` 列总是在最后一个 `USING` 条件中。 -例如,请考虑下表: +例如,参考下表: table_1 table_2 event | ev_time | user_id event | ev_time | user_id @@ -88,10 +88,10 @@ USING (equi_column1, ... equi_columnN, asof_column) event_1_2 | 13:00 | 42 event_2_3 | 13:00 | 42 ... ... -`ASOF JOIN` 可以从用户事件的时间戳 `table_1` 并找到一个事件 `table_2` 其中时间戳最接近事件的时间戳 `table_1` 对应于最接近的匹配条件。 如果可用,则相等的时间戳值是最接近的值。 在这里,该 `user_id` 列可用于连接相等和 `ev_time` 列可用于在最接近的匹配加入。 在我们的例子中, `event_1_1` 可以加入 `event_2_1` 和 `event_1_2` 可以加入 `event_2_3`,但是 `event_2_2` 不能加入。 +`ASOF JOIN`会从 `table_2` 中的用户事件时间戳找出和 `table_1` 中用户事件时间戳中最近的一个时间戳,来满足最接近匹配的条件。如果有得话,则相等的时间戳值是最接近的值。在此例中,`user_id` 列可用于条件匹配,`ev_time` 列可用于最接近匹配。在此例中,`event_1_1` 可以 JOIN `event_2_1`,`event_1_2` 可以JOIN `event_2_3`,但是 `event_2_2` 不能被JOIN。 !!! note "注" - `ASOF` 加入是 **不** 支持在 [加入我们](../../../engines/table-engines/special/join.md) 表引擎。 + `ASOF JOIN`在 [JOIN](../../../engines/table-engines/special/join.md) 表引擎中 **不受** 支持。 ## 分布式联接 {#global-join} diff --git a/docs/zh/sql-reference/syntax.md b/docs/zh/sql-reference/syntax.md index 8c331db1139..c05c5a1a7bf 100644 --- a/docs/zh/sql-reference/syntax.md +++ b/docs/zh/sql-reference/syntax.md @@ -14,7 +14,7 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') 含`INSERT INTO t VALUES` 的部分由完整SQL解析器处理,包含数据的部分 `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` 交给快速流式解析器解析。通过设置参数 [input_format_values_interpret_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions),你也可以对数据部分开启完整SQL解析器。当 `input_format_values_interpret_expressions = 1` 时,CH优先采用快速流式解析器来解析数据。如果失败,CH再尝试用完整SQL解析器来处理,就像处理SQL [expression](#syntax-expressions) 一样。 -数据可以采用任何格式。当CH接受到请求时,服务端先在内存中计算不超过 [max_query_size](../operations/settings/settings.md#settings-max_query_size) 字节的请求数据(默认1 mb),然后剩下部分交给快速流式解析器。 +数据可以采用任何格式。当CH接收到请求时,服务端先在内存中计算不超过 [max_query_size](../operations/settings/settings.md#settings-max_query_size) 字节的请求数据(默认1 mb),然后剩下部分交给快速流式解析器。 这将避免在处理大型的 `INSERT`语句时出现问题。 diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index c3600e5812a..2af0331c70b 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -33,13 +33,29 @@ option (ENABLE_CLICKHOUSE_OBFUSCATOR "Table data obfuscator (convert real data t ${ENABLE_CLICKHOUSE_ALL}) # https://clickhouse.tech/docs/en/operations/utilities/odbc-bridge/ -option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver" +if (ENABLE_ODBC) + option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver" + ${ENABLE_CLICKHOUSE_ALL}) +else () + option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver" OFF) +endif () + +option (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE "HTTP-server working like a proxy to Library dictionary source" ${ENABLE_CLICKHOUSE_ALL}) # https://presentations.clickhouse.tech/matemarketing_2020/ option (ENABLE_CLICKHOUSE_GIT_IMPORT "A tool to analyze Git repositories" ${ENABLE_CLICKHOUSE_ALL}) + +option (ENABLE_CLICKHOUSE_KEEPER "ClickHouse alternative to ZooKeeper" ${ENABLE_CLICKHOUSE_ALL}) +if (NOT USE_NURAFT) + # RECONFIGURE_MESSAGE_LEVEL should not be used here, + # since USE_NURAFT is set to OFF for FreeBSD and Darwin. + message (STATUS "clickhouse-keeper will not be built (lack of NuRaft)") + set(ENABLE_CLICKHOUSE_KEEPER OFF) +endif() + if (CLICKHOUSE_SPLIT_BINARY) option(ENABLE_CLICKHOUSE_INSTALL "Install ClickHouse without .deb/.rpm/.tgz packages (having the binary only)" OFF) else () @@ -109,6 +125,12 @@ else() message(STATUS "ODBC bridge mode: OFF") endif() +if (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE) + message(STATUS "Library bridge mode: ON") +else() + message(STATUS "Library bridge mode: OFF") +endif() + if (ENABLE_CLICKHOUSE_INSTALL) message(STATUS "ClickHouse install: ON") else() @@ -121,6 +143,12 @@ else() message(STATUS "ClickHouse git-import: OFF") endif() +if (ENABLE_CLICKHOUSE_KEEPER) + message(STATUS "ClickHouse keeper mode: ON") +else() + message(STATUS "ClickHouse keeper mode: OFF") +endif() + if(NOT (MAKE_STATIC_LIBRARIES OR SPLIT_SHARED_LIBRARIES)) set(CLICKHOUSE_ONE_SHARED ON) endif() @@ -176,6 +204,54 @@ macro(clickhouse_program_add name) clickhouse_program_add_executable(${name}) endmacro() +# Embed default config files as a resource into the binary. +# This is needed for two purposes: +# 1. Allow to run the binary without download of any other files. +# 2. Allow to implement "sudo clickhouse install" tool. +# +# Arguments: target (server, client, keeper, etc.) and list of files +# +# Also dependency on TARGET_FILE is required, look at examples in programs/server and programs/keeper +macro(clickhouse_embed_binaries) + # TODO We actually need this on Mac, FreeBSD. + if (OS_LINUX) + + set(arguments_list "${ARGN}") + list(GET arguments_list 0 target) + + # for some reason cmake iterates loop including + math(EXPR arguments_count "${ARGC}-1") + + foreach(RESOURCE_POS RANGE 1 "${arguments_count}") + list(GET arguments_list "${RESOURCE_POS}" RESOURCE_FILE) + set(RESOURCE_OBJ ${RESOURCE_FILE}.o) + set(RESOURCE_OBJS ${RESOURCE_OBJS} ${RESOURCE_OBJ}) + + # https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake + # PPC64LE fails to do this with objcopy, use ld or lld instead + if (ARCH_PPC64LE) + add_custom_command(OUTPUT ${RESOURCE_OBJ} + COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${CMAKE_LINKER} -m elf64lppc -r -b binary -o "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" ${RESOURCE_FILE}) + else() + add_custom_command(OUTPUT ${RESOURCE_OBJ} + COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${RESOURCE_FILE} "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" + COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents + "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}") + endif() + set_source_files_properties(${RESOURCE_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true) + endforeach() + + add_library(clickhouse_${target}_configs STATIC ${RESOURCE_OBJS}) + set_target_properties(clickhouse_${target}_configs PROPERTIES LINKER_LANGUAGE C) + + # whole-archive prevents symbols from being discarded for unknown reason + # CMake can shuffle each of target_link_libraries arguments with other + # libraries in linker command. To avoid this we hardcode whole-archive + # library into single string. + add_dependencies(clickhouse-${target}-lib clickhouse_${target}_configs) + endif () +endmacro() + add_subdirectory (server) add_subdirectory (client) @@ -190,25 +266,52 @@ add_subdirectory (install) add_subdirectory (git-import) add_subdirectory (bash-completion) +if (ENABLE_CLICKHOUSE_KEEPER) + add_subdirectory (keeper) +endif() + if (ENABLE_CLICKHOUSE_ODBC_BRIDGE) add_subdirectory (odbc-bridge) endif () +if (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE) + add_subdirectory (library-bridge) +endif () + if (CLICKHOUSE_ONE_SHARED) - add_library(clickhouse-lib SHARED ${CLICKHOUSE_SERVER_SOURCES} ${CLICKHOUSE_CLIENT_SOURCES} ${CLICKHOUSE_LOCAL_SOURCES} ${CLICKHOUSE_BENCHMARK_SOURCES} ${CLICKHOUSE_COPIER_SOURCES} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES} ${CLICKHOUSE_COMPRESSOR_SOURCES} ${CLICKHOUSE_FORMAT_SOURCES} ${CLICKHOUSE_OBFUSCATOR_SOURCES} ${CLICKHOUSE_GIT_IMPORT_SOURCES} ${CLICKHOUSE_ODBC_BRIDGE_SOURCES}) - target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_GIT_IMPORT_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK}) - target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_GIT_IMPORT_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE}) + add_library(clickhouse-lib SHARED ${CLICKHOUSE_SERVER_SOURCES} ${CLICKHOUSE_CLIENT_SOURCES} ${CLICKHOUSE_LOCAL_SOURCES} ${CLICKHOUSE_BENCHMARK_SOURCES} ${CLICKHOUSE_COPIER_SOURCES} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES} ${CLICKHOUSE_COMPRESSOR_SOURCES} ${CLICKHOUSE_FORMAT_SOURCES} ${CLICKHOUSE_OBFUSCATOR_SOURCES} ${CLICKHOUSE_GIT_IMPORT_SOURCES} ${CLICKHOUSE_ODBC_BRIDGE_SOURCES} ${CLICKHOUSE_KEEPER_SOURCES}) + target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_GIT_IMPORT_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK} ${CLICKHOUSE_KEEPER_LINK}) + target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_GIT_IMPORT_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} ${CLICKHOUSE_KEEPER_INCLUDE}) set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "") install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse) endif() if (CLICKHOUSE_SPLIT_BINARY) - set (CLICKHOUSE_ALL_TARGETS clickhouse-server clickhouse-client clickhouse-local clickhouse-benchmark clickhouse-extract-from-config clickhouse-compressor clickhouse-format clickhouse-obfuscator clickhouse-git-import clickhouse-copier) + set (CLICKHOUSE_ALL_TARGETS + clickhouse-server + clickhouse-client + clickhouse-local + clickhouse-benchmark + clickhouse-extract-from-config + clickhouse-compressor + clickhouse-format + clickhouse-obfuscator + clickhouse-git-import + clickhouse-copier + ) if (ENABLE_CLICKHOUSE_ODBC_BRIDGE) list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-odbc-bridge) endif () + if (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE) + list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-library-bridge) + endif () + + if (ENABLE_CLICKHOUSE_KEEPER) + list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-keeper) + endif () + set_target_properties(${CLICKHOUSE_ALL_TARGETS} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_ALL_TARGETS}) @@ -256,6 +359,9 @@ else () if (ENABLE_CLICKHOUSE_GIT_IMPORT) clickhouse_target_link_split_lib(clickhouse git-import) endif () + if (ENABLE_CLICKHOUSE_KEEPER) + clickhouse_target_link_split_lib(clickhouse keeper) + endif() if (ENABLE_CLICKHOUSE_INSTALL) clickhouse_target_link_split_lib(clickhouse install) endif () @@ -263,54 +369,59 @@ else () set (CLICKHOUSE_BUNDLE) if (ENABLE_CLICKHOUSE_SERVER) add_custom_target (clickhouse-server ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-server DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-server DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-server" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-server) endif () if (ENABLE_CLICKHOUSE_CLIENT) add_custom_target (clickhouse-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-client DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-client DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-client) endif () if (ENABLE_CLICKHOUSE_LOCAL) add_custom_target (clickhouse-local ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-local DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-local DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-local" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-local) endif () if (ENABLE_CLICKHOUSE_BENCHMARK) add_custom_target (clickhouse-benchmark ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-benchmark DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-benchmark DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-benchmark" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-benchmark) endif () if (ENABLE_CLICKHOUSE_COPIER) add_custom_target (clickhouse-copier ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-copier DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-copier DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-copier" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-copier) endif () if (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG) add_custom_target (clickhouse-extract-from-config ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-extract-from-config DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-extract-from-config DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-extract-from-config" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-extract-from-config) endif () if (ENABLE_CLICKHOUSE_COMPRESSOR) add_custom_target (clickhouse-compressor ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-compressor DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-compressor DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-compressor" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-compressor) endif () if (ENABLE_CLICKHOUSE_FORMAT) add_custom_target (clickhouse-format ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-format DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-format DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-format" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-format) endif () if (ENABLE_CLICKHOUSE_OBFUSCATOR) add_custom_target (clickhouse-obfuscator ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-obfuscator DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-obfuscator DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-obfuscator" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-obfuscator) endif () if (ENABLE_CLICKHOUSE_GIT_IMPORT) add_custom_target (clickhouse-git-import ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-git-import DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-git-import DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-git-import" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-git-import) endif () + if (ENABLE_CLICKHOUSE_KEEPER) + add_custom_target (clickhouse-keeper ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper DEPENDS clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper) + endif () install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) diff --git a/programs/bash-completion/completions/clickhouse b/programs/bash-completion/completions/clickhouse index c4b77cf3f7a..fc55398dcf1 100644 --- a/programs/bash-completion/completions/clickhouse +++ b/programs/bash-completion/completions/clickhouse @@ -23,19 +23,9 @@ function _complete_for_clickhouse_entrypoint_bin() fi util="${words[1]}" - case "$prev" in - -C|--config-file|--config) - return - ;; - # Argh... This looks like a bash bug... - # Redirections are passed to the completion function - # although it is managed by the shell directly... - '<'|'>'|'>>'|[12]'>'|[12]'>>') - return - ;; - esac - - COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd" "$util")" -- "$cur") ) + if _complete_for_clickhouse_generic_bin_impl "$prev"; then + COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd" "$util")" -- "$cur") ) + fi return 0 } diff --git a/programs/bash-completion/completions/clickhouse-bootstrap b/programs/bash-completion/completions/clickhouse-bootstrap index dc8dcd5ad8d..7109148a192 100644 --- a/programs/bash-completion/completions/clickhouse-bootstrap +++ b/programs/bash-completion/completions/clickhouse-bootstrap @@ -15,6 +15,76 @@ shopt -s extglob export _CLICKHOUSE_COMPLETION_LOADED=1 +CLICKHOUSE_QueryProcessingStage=( + complete + fetch_columns + with_mergeable_state + with_mergeable_state_after_aggregation +) + +CLICKHOUSE_Format=( + CapnProto + PostgreSQLWire + MySQLWire + JSONStringsEachRowWithProgress + JSONEachRowWithProgress + JSONCompact + JSON + CSV + Vertical + ODBCDriver2 + PrettySpaceNoEscapes + Pretty + JSONCompactStrings + PrettyNoEscapes + ArrowStream + TabSeparatedWithNames + Parquet + Arrow + PrettyCompact + AvroConfluent + ORC + PrettyCompactNoEscapes + RawBLOB + Template + MsgPack + JSONCompactEachRow + CustomSeparated + TemplateIgnoreSpaces + Markdown + XML + ProtobufSingle + JSONCompactStringsEachRowWithNamesAndTypes + TSKV + TabSeparated + JSONStringEachRow + JSONStringsEachRow + TSVRaw + Values + TabSeparatedWithNamesAndTypes + PrettyCompactMonoBlock + TSVWithNamesAndTypes + Avro + RowBinaryWithNamesAndTypes + LineAsString + Native + JSONCompactEachRowWithNamesAndTypes + PrettySpace + Regexp + TSV + JSONEachRow + CustomSeparatedIgnoreSpaces + CSVWithNames + JSONStrings + Null + TabSeparatedRaw + TSVWithNames + Protobuf + RowBinary + JSONAsString + JSONCompactStringsEachRow +) + function _clickhouse_bin_exist() { [ -x "$1" ] || command -v "$1" >& /dev/null; } @@ -30,6 +100,37 @@ function _clickhouse_get_options() "$@" --help 2>&1 | awk -F '[ ,=<>]' '{ for (i=1; i <= NF; ++i) { if (substr($i, 0, 1) == "-" && length($i) > 1) print $i; } }' | sort -u } +function _complete_for_clickhouse_generic_bin_impl() +{ + local prev=$1 && shift + + case "$prev" in + -C|--config-file|--config) + return 1 + ;; + --stage) + COMPREPLY=( $(compgen -W "${CLICKHOUSE_QueryProcessingStage[*]}" -- "$cur") ) + return 1 + ;; + --format|--input-format|--output-format) + COMPREPLY=( $(compgen -W "${CLICKHOUSE_Format[*]}" -- "$cur") ) + return 1 + ;; + --host) + COMPREPLY=( $(compgen -A hostname -- "$cur") ) + return 1 + ;; + # Argh... This looks like a bash bug... + # Redirections are passed to the completion function + # although it is managed by the shell directly... + '<'|'>'|'>>'|[12]'>'|[12]'>>') + return 1 + ;; + esac + + return 0 +} + function _complete_for_clickhouse_generic_bin() { local cur prev @@ -39,19 +140,9 @@ function _complete_for_clickhouse_generic_bin() COMPREPLY=() _get_comp_words_by_ref cur prev - case "$prev" in - -C|--config-file|--config) - return - ;; - # Argh... This looks like a bash bug... - # Redirections are passed to the completion function - # although it is managed by the shell directly... - '<'|'>'|'>>'|[12]'>'|[12]'>>') - return - ;; - esac - - COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd")" -- "$cur") ) + if _complete_for_clickhouse_generic_bin_impl "$prev"; then + COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd")" -- "$cur") ) + fi return 0 } diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index a0e2ea155ba..2e48c5d20c5 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -36,7 +35,9 @@ #include #include #include +#include +namespace fs = std::filesystem; /** A tool for evaluating ClickHouse performance. * The tool emulates a case with fixed amount of simultaneously executing queries. @@ -95,8 +96,8 @@ public: comparison_info_total.emplace_back(std::make_shared()); } - global_context.makeGlobalContext(); - global_context.setSettings(settings); + global_context->makeGlobalContext(); + global_context->setSettings(settings); std::cerr << std::fixed << std::setprecision(3); @@ -119,8 +120,8 @@ public: int main(const std::vector &) override { - if (!json_path.empty() && Poco::File(json_path).exists()) /// Clear file with previous results - Poco::File(json_path).remove(); + if (!json_path.empty() && fs::exists(json_path)) /// Clear file with previous results + fs::remove(json_path); readQueries(); runBenchmark(); @@ -159,7 +160,7 @@ private: bool print_stacktrace; const Settings & settings; SharedContextHolder shared_context; - Context global_context; + ContextMutablePtr global_context; QueryProcessingStage::Enum query_processing_stage; /// Don't execute new queries after timelimit or SIGINT or exception diff --git a/programs/client/CMakeLists.txt b/programs/client/CMakeLists.txt index 72b5caf9784..084e1b45911 100644 --- a/programs/client/CMakeLists.txt +++ b/programs/client/CMakeLists.txt @@ -21,4 +21,4 @@ list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE readpassphrase) clickhouse_program_add(client) -install (FILES clickhouse-client.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-client COMPONENT clickhouse-client RENAME config.xml) +install (FILES clickhouse-client.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-client" COMPONENT clickhouse-client RENAME config.xml) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index ca9976ac4a8..f268d2b5cdc 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1,7 +1,7 @@ -#include "TestHint.h" #include "ConnectionParameters.h" #include "QueryFuzzer.h" #include "Suggest.h" +#include "TestHint.h" #if USE_REPLXX # include @@ -21,11 +21,10 @@ #include #include #include -#include +#include #include #include #include -#include #include #include #include @@ -86,6 +85,9 @@ #include #include #include +#include +#include +#include #if !defined(ARCADIA_BUILD) # include @@ -95,13 +97,10 @@ #pragma GCC optimize("-fno-var-tracking-assignments") #endif -/// http://en.wikipedia.org/wiki/ANSI_escape_code -#define CLEAR_TO_END_OF_LINE "\033[K" - +namespace fs = std::filesystem; namespace DB { - namespace ErrorCodes { extern const int NETWORK_ERROR; @@ -119,8 +118,7 @@ namespace ErrorCodes static bool queryHasWithClause(const IAST * ast) { - if (const auto * select = dynamic_cast(ast); - select && select->with()) + if (const auto * select = dynamic_cast(ast); select && select->with()) { return true; } @@ -152,28 +150,22 @@ public: private: using StringSet = std::unordered_set; - StringSet exit_strings - { - "exit", "quit", "logout", - "учше", "йгше", "дщпщге", - "exit;", "quit;", "logout;", - "учшеж", "йгшеж", "дщпщгеж", - "q", "й", "\\q", "\\Q", "\\й", "\\Й", ":q", "Жй" - }; - bool is_interactive = true; /// Use either interactive line editing interface or batch mode. - bool need_render_progress = true; /// Render query execution progress. - bool echo_queries = false; /// Print queries before execution in batch mode. - bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode. - bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode. - bool stdin_is_a_tty = false; /// stdin is a terminal. - bool stdout_is_a_tty = false; /// stdout is a terminal. + StringSet exit_strings{"exit", "quit", "logout", "учше", "йгше", "дщпщге", "exit;", "quit;", "logout;", "учшеж", + "йгшеж", "дщпщгеж", "q", "й", "\\q", "\\Q", "\\й", "\\Й", ":q", "Жй"}; + bool is_interactive = true; /// Use either interactive line editing interface or batch mode. + bool echo_queries = false; /// Print queries before execution in batch mode. + bool ignore_error + = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode. + bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode. + bool stdin_is_a_tty = false; /// stdin is a terminal. + bool stdout_is_a_tty = false; /// stdout is a terminal. /// If not empty, queries will be read from these files std::vector queries_files; /// If not empty, run queries from these files before processing every file from 'queries_files'. std::vector interleave_queries_files; - std::unique_ptr connection; /// Connection to DB. + std::unique_ptr connection; /// Connection to DB. String full_query; /// Current query as it was given to the client. // Current query as it will be sent to the server. It may differ from the @@ -181,23 +173,23 @@ private: // is stripped and sent separately. String query_to_send; - String format; /// Query results output format. - bool is_default_format = true; /// false, if format is set in the config or command line. - size_t format_max_block_size = 0; /// Max block size for console output. - String insert_format; /// Format of INSERT data that is read from stdin in batch mode. + String format; /// Query results output format. + bool is_default_format = true; /// false, if format is set in the config or command line. + size_t format_max_block_size = 0; /// Max block size for console output. + String insert_format; /// Format of INSERT data that is read from stdin in batch mode. size_t insert_format_max_block_size = 0; /// Max block size when reading INSERT data. size_t max_client_network_bandwidth = 0; /// The maximum speed of data exchange over the network for the client in bytes per second. bool has_vertical_output_suffix = false; /// Is \G present at the end of the query string? SharedContextHolder shared_context = Context::createShared(); - Context context = Context::createGlobal(shared_context.get()); + ContextMutablePtr context = Context::createGlobal(shared_context.get()); /// Buffer that reads from stdin in batch mode. - ReadBufferFromFileDescriptor std_in {STDIN_FILENO}; + ReadBufferFromFileDescriptor std_in{STDIN_FILENO}; /// Console output. - WriteBufferFromFileDescriptor std_out {STDOUT_FILENO}; + WriteBufferFromFileDescriptor std_out{STDOUT_FILENO}; std::unique_ptr pager_cmd; /// The user can specify to redirect query output to a file. @@ -242,10 +234,9 @@ private: /// The server periodically sends information about how much data was read since last time. Progress progress; - bool show_progress_bar = false; - size_t written_progress_chars = 0; - bool written_first_block = false; + /// Progress bar + ProgressBar progress_bar; /// External tables info. std::list external_tables; @@ -274,20 +265,20 @@ private: configReadClient(config(), home_path); - context.setApplicationType(Context::ApplicationType::CLIENT); - context.setQueryParameters(query_parameters); + context->setApplicationType(Context::ApplicationType::CLIENT); + context->setQueryParameters(query_parameters); /// settings and limits could be specified in config file, but passed settings has higher priority - for (const auto & setting : context.getSettingsRef().allUnchanged()) + for (const auto & setting : context->getSettingsRef().allUnchanged()) { const auto & name = setting.getName(); if (config().has(name)) - context.setSetting(name, config().getString(name)); + context->setSetting(name, config().getString(name)); } /// Set path for format schema files if (config().has("format_schema_path")) - context.setFormatSchemaPath(Poco::Path(config().getString("format_schema_path")).toString()); + context->setFormatSchemaPath(fs::weakly_canonical(config().getString("format_schema_path"))); /// Initialize query_id_formats if any if (config().has("query_id_formats")) @@ -322,16 +313,13 @@ private: if (std::string::npos != embedded_stack_trace_pos && !print_stack_trace) text.resize(embedded_stack_trace_pos); - std::cerr << "Code: " << e.code() << ". " << text << std::endl << std::endl; + std::cerr << "Code: " << e.code() << ". " << text << std::endl << std::endl; /// Don't print the stack trace on the client if it was logged on the server. /// Also don't print the stack trace in case of network errors. - if (print_stack_trace - && e.code() != ErrorCodes::NETWORK_ERROR - && std::string::npos == embedded_stack_trace_pos) + if (print_stack_trace && e.code() != ErrorCodes::NETWORK_ERROR && std::string::npos == embedded_stack_trace_pos) { - std::cerr << "Stack trace:" << std::endl - << e.getStackTraceString(); + std::cerr << "Stack trace:" << std::endl << e.getStackTraceString(); } /// If exception code isn't zero, we should return non-zero return code anyway. @@ -354,8 +342,7 @@ private: return false; LocalDate now(current_time); - return (now.month() == 12 && now.day() >= 20) - || (now.month() == 1 && now.day() <= 5); + return (now.month() == 12 && now.day() >= 20) || (now.month() == 1 && now.day() <= 5); } static bool isChineseNewYearMode(const String & local_tz) @@ -406,9 +393,9 @@ private: if (chineseNewYearTimeZoneIndicators + M == std::find_if(chineseNewYearTimeZoneIndicators, chineseNewYearTimeZoneIndicators + M, [&local_tz](const char * tz) - { - return tz == local_tz; - })) + { + return tz == local_tz; + })) return false; /// It's bad to be intrusive. @@ -432,52 +419,51 @@ private: { using namespace replxx; - static const std::unordered_map token_to_color = - { - { TokenType::Whitespace, Replxx::Color::DEFAULT }, - { TokenType::Comment, Replxx::Color::GRAY }, - { TokenType::BareWord, Replxx::Color::DEFAULT }, - { TokenType::Number, Replxx::Color::GREEN }, - { TokenType::StringLiteral, Replxx::Color::CYAN }, - { TokenType::QuotedIdentifier, Replxx::Color::MAGENTA }, - { TokenType::OpeningRoundBracket, Replxx::Color::BROWN }, - { TokenType::ClosingRoundBracket, Replxx::Color::BROWN }, - { TokenType::OpeningSquareBracket, Replxx::Color::BROWN }, - { TokenType::ClosingSquareBracket, Replxx::Color::BROWN }, - { TokenType::OpeningCurlyBrace, Replxx::Color::INTENSE }, - { TokenType::ClosingCurlyBrace, Replxx::Color::INTENSE }, + static const std::unordered_map token_to_color + = {{TokenType::Whitespace, Replxx::Color::DEFAULT}, + {TokenType::Comment, Replxx::Color::GRAY}, + {TokenType::BareWord, Replxx::Color::DEFAULT}, + {TokenType::Number, Replxx::Color::GREEN}, + {TokenType::StringLiteral, Replxx::Color::CYAN}, + {TokenType::QuotedIdentifier, Replxx::Color::MAGENTA}, + {TokenType::OpeningRoundBracket, Replxx::Color::BROWN}, + {TokenType::ClosingRoundBracket, Replxx::Color::BROWN}, + {TokenType::OpeningSquareBracket, Replxx::Color::BROWN}, + {TokenType::ClosingSquareBracket, Replxx::Color::BROWN}, + {TokenType::OpeningCurlyBrace, Replxx::Color::INTENSE}, + {TokenType::ClosingCurlyBrace, Replxx::Color::INTENSE}, - { TokenType::Comma, Replxx::Color::INTENSE }, - { TokenType::Semicolon, Replxx::Color::INTENSE }, - { TokenType::Dot, Replxx::Color::INTENSE }, - { TokenType::Asterisk, Replxx::Color::INTENSE }, - { TokenType::Plus, Replxx::Color::INTENSE }, - { TokenType::Minus, Replxx::Color::INTENSE }, - { TokenType::Slash, Replxx::Color::INTENSE }, - { TokenType::Percent, Replxx::Color::INTENSE }, - { TokenType::Arrow, Replxx::Color::INTENSE }, - { TokenType::QuestionMark, Replxx::Color::INTENSE }, - { TokenType::Colon, Replxx::Color::INTENSE }, - { TokenType::Equals, Replxx::Color::INTENSE }, - { TokenType::NotEquals, Replxx::Color::INTENSE }, - { TokenType::Less, Replxx::Color::INTENSE }, - { TokenType::Greater, Replxx::Color::INTENSE }, - { TokenType::LessOrEquals, Replxx::Color::INTENSE }, - { TokenType::GreaterOrEquals, Replxx::Color::INTENSE }, - { TokenType::Concatenation, Replxx::Color::INTENSE }, - { TokenType::At, Replxx::Color::INTENSE }, - { TokenType::DoubleAt, Replxx::Color::MAGENTA }, + {TokenType::Comma, Replxx::Color::INTENSE}, + {TokenType::Semicolon, Replxx::Color::INTENSE}, + {TokenType::Dot, Replxx::Color::INTENSE}, + {TokenType::Asterisk, Replxx::Color::INTENSE}, + {TokenType::Plus, Replxx::Color::INTENSE}, + {TokenType::Minus, Replxx::Color::INTENSE}, + {TokenType::Slash, Replxx::Color::INTENSE}, + {TokenType::Percent, Replxx::Color::INTENSE}, + {TokenType::Arrow, Replxx::Color::INTENSE}, + {TokenType::QuestionMark, Replxx::Color::INTENSE}, + {TokenType::Colon, Replxx::Color::INTENSE}, + {TokenType::Equals, Replxx::Color::INTENSE}, + {TokenType::NotEquals, Replxx::Color::INTENSE}, + {TokenType::Less, Replxx::Color::INTENSE}, + {TokenType::Greater, Replxx::Color::INTENSE}, + {TokenType::LessOrEquals, Replxx::Color::INTENSE}, + {TokenType::GreaterOrEquals, Replxx::Color::INTENSE}, + {TokenType::Concatenation, Replxx::Color::INTENSE}, + {TokenType::At, Replxx::Color::INTENSE}, + {TokenType::DoubleAt, Replxx::Color::MAGENTA}, - { TokenType::EndOfStream, Replxx::Color::DEFAULT }, + {TokenType::EndOfStream, Replxx::Color::DEFAULT}, - { TokenType::Error, Replxx::Color::RED }, - { TokenType::ErrorMultilineCommentIsNotClosed, Replxx::Color::RED }, - { TokenType::ErrorSingleQuoteIsNotClosed, Replxx::Color::RED }, - { TokenType::ErrorDoubleQuoteIsNotClosed, Replxx::Color::RED }, - { TokenType::ErrorSinglePipeMark, Replxx::Color::RED }, - { TokenType::ErrorWrongNumber, Replxx::Color::RED }, - { TokenType::ErrorMaxQuerySizeExceeded, Replxx::Color::RED } - }; + {TokenType::Error, Replxx::Color::RED}, + {TokenType::ErrorMultilineCommentIsNotClosed, Replxx::Color::RED}, + {TokenType::ErrorSingleQuoteIsNotClosed, Replxx::Color::RED}, + {TokenType::ErrorDoubleQuoteIsNotClosed, Replxx::Color::RED}, + {TokenType::ErrorSinglePipeMark, Replxx::Color::RED}, + {TokenType::ErrorWrongNumber, Replxx::Color::RED}, + { TokenType::ErrorMaxQuerySizeExceeded, + Replxx::Color::RED }}; const Replxx::Color unknown_token_color = Replxx::Color::RED; @@ -527,7 +513,10 @@ private: std::cerr << std::fixed << std::setprecision(3); if (is_interactive) + { + clearTerminal(); showClientVersion(); + } is_default_format = !config().has("vertical") && !config().has("format"); if (config().has("vertical")) @@ -535,24 +524,24 @@ private: else format = config().getString("format", is_interactive ? "PrettyCompact" : "TabSeparated"); - format_max_block_size = config().getInt("format_max_block_size", context.getSettingsRef().max_block_size); + format_max_block_size = config().getInt("format_max_block_size", context->getSettingsRef().max_block_size); insert_format = "Values"; /// Setting value from cmd arg overrides one from config - if (context.getSettingsRef().max_insert_block_size.changed) - insert_format_max_block_size = context.getSettingsRef().max_insert_block_size; + if (context->getSettingsRef().max_insert_block_size.changed) + insert_format_max_block_size = context->getSettingsRef().max_insert_block_size; else - insert_format_max_block_size = config().getInt("insert_format_max_block_size", context.getSettingsRef().max_insert_block_size); + insert_format_max_block_size = config().getInt("insert_format_max_block_size", context->getSettingsRef().max_insert_block_size); if (!is_interactive) { - need_render_progress = config().getBool("progress", false); + progress_bar.need_render_progress = config().getBool("progress", false); echo_queries = config().getBool("echo", false); ignore_error = config().getBool("ignore-error", false); } - ClientInfo & client_info = context.getClientInfo(); + ClientInfo & client_info = context->getClientInfo(); client_info.setInitialQuery(); client_info.quota_key = config().getString("quota_key", ""); @@ -560,7 +549,7 @@ private: /// Initialize DateLUT here to avoid counting time spent here as query execution time. const auto local_tz = DateLUT::instance().getTimeZone(); - if (!context.getSettingsRef().use_client_time_zone) + if (!context->getSettingsRef().use_client_time_zone) { const auto & time_zone = connection->getServerTimezone(connection_parameters.timeouts); if (!time_zone.empty()) @@ -572,16 +561,16 @@ private: catch (...) { std::cerr << "Warning: could not switch to server time zone: " << time_zone - << ", reason: " << getCurrentExceptionMessage(/* with_stacktrace = */ false) << std::endl - << "Proceeding with local time zone." - << std::endl << std::endl; + << ", reason: " << getCurrentExceptionMessage(/* with_stacktrace = */ false) << std::endl + << "Proceeding with local time zone." << std::endl + << std::endl; } } else { std::cerr << "Warning: could not determine server time zone. " - << "Proceeding with local time zone." - << std::endl << std::endl; + << "Proceeding with local time zone." << std::endl + << std::endl; } } @@ -609,8 +598,7 @@ private: } /// Prompt may contain the following substitutions in a form of {name}. - std::map prompt_substitutions - { + std::map prompt_substitutions{ {"host", connection_parameters.host}, {"port", toString(connection_parameters.port)}, {"user", connection_parameters.user}, @@ -618,7 +606,7 @@ private: }; /// Quite suboptimal. - for (const auto & [key, value]: prompt_substitutions) + for (const auto & [key, value] : prompt_substitutions) boost::replace_all(prompt_by_server_display_name, "{" + key + "}", value); if (is_interactive) @@ -647,8 +635,8 @@ private: history_file = home_path + "/.clickhouse-client-history"; } - if (!history_file.empty() && !Poco::File(history_file).exists()) - Poco::File(history_file).createFile(); + if (!history_file.empty() && !fs::exists(history_file)) + FS::createFile(history_file); LineReader::Patterns query_extenders = {"\\"}; LineReader::Patterns query_delimiters = {";", "\\G"}; @@ -658,13 +646,7 @@ private: if (config().getBool("highlight")) highlight_callback = highlight; - ReplxxLineReader lr( - *suggest, - history_file, - config().has("multiline"), - query_extenders, - query_delimiters, - highlight_callback); + ReplxxLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters, highlight_callback); #elif defined(USE_READLINE) && USE_READLINE ReadlineLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters); @@ -701,8 +683,8 @@ private: // We don't need to handle the test hints in the interactive // mode. std::cerr << std::endl - << "Exception on client:" << std::endl - << "Code: " << e.code() << ". " << e.displayText() << std::endl; + << "Exception on client:" << std::endl + << "Code: " << e.code() << ". " << e.displayText() << std::endl; if (config().getBool("stacktrace", false)) std::cerr << "Stack trace:" << std::endl << e.getStackTraceString() << std::endl; @@ -720,8 +702,7 @@ private: /// So we reconnect and allow to enter the next query. connect(); } - } - while (true); + } while (true); if (isNewYearMode()) std::cout << "Happy new year." << std::endl; @@ -735,14 +716,13 @@ private: { auto query_id = config().getString("query_id", ""); if (!query_id.empty()) - context.setCurrentQueryId(query_id); + context->setCurrentQueryId(query_id); nonInteractive(); // If exception code isn't zero, we should return non-zero return // code anyway. - const auto * exception = server_exception - ? server_exception.get() : client_exception.get(); + const auto * exception = server_exception ? server_exception.get() : client_exception.get(); if (exception) { return exception->code() != 0 ? exception->code() : -1; @@ -765,10 +745,10 @@ private: if (is_interactive) std::cout << "Connecting to " - << (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at " : "") - << connection_parameters.host << ":" << connection_parameters.port - << (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") - << "." << std::endl; + << (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at " + : "") + << connection_parameters.host << ":" << connection_parameters.port + << (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl; connection = std::make_unique( connection_parameters.host, @@ -793,8 +773,8 @@ private: connection->setThrottler(throttler); } - connection->getServerVersion(connection_parameters.timeouts, - server_name, server_version_major, server_version_minor, server_version_patch, server_revision); + connection->getServerVersion( + connection_parameters.timeouts, server_name, server_version_major, server_version_minor, server_version_patch, server_revision); server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch); @@ -805,10 +785,9 @@ private: if (is_interactive) { - std::cout << "Connected to " << server_name - << " server version " << server_version - << " revision " << server_revision - << "." << std::endl << std::endl; + std::cout << "Connected to " << server_name << " server version " << server_version << " revision " << server_revision << "." + << std::endl + << std::endl; auto client_version_tuple = std::make_tuple(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH); auto server_version_tuple = std::make_tuple(server_version_major, server_version_minor, server_version_patch); @@ -816,14 +795,14 @@ private: if (client_version_tuple < server_version_tuple) { std::cout << "ClickHouse client version is older than ClickHouse server. " - << "It may lack support for new features." - << std::endl << std::endl; + << "It may lack support for new features." << std::endl + << std::endl; } else if (client_version_tuple > server_version_tuple) { std::cout << "ClickHouse server version is older than ClickHouse client. " - << "It may indicate that the server is out of date and can be upgraded." - << std::endl << std::endl; + << "It may indicate that the server is out of date and can be upgraded." << std::endl + << std::endl; } } } @@ -880,7 +859,7 @@ private: bool processQueryText(const String & text) { - if (exit_strings.end() != exit_strings.find(trim(text, [](char c){ return isWhitespaceASCII(c) || c == ';'; }))) + if (exit_strings.end() != exit_strings.find(trim(text, [](char c) { return isWhitespaceASCII(c) || c == ';'; }))) return false; if (!config().has("multiquery")) @@ -901,15 +880,13 @@ private: // Consumes trailing semicolons and tries to consume the same-line trailing // comment. - static void adjustQueryEnd(const char *& this_query_end, - const char * all_queries_end, int max_parser_depth) + static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, int max_parser_depth) { // We have to skip the trailing semicolon that might be left // after VALUES parsing or just after a normal semicolon-terminated query. Tokens after_query_tokens(this_query_end, all_queries_end); IParser::Pos after_query_iterator(after_query_tokens, max_parser_depth); - while (after_query_iterator.isValid() - && after_query_iterator->type == TokenType::Semicolon) + while (after_query_iterator.isValid() && after_query_iterator->type == TokenType::Semicolon) { this_query_end = after_query_iterator->end; ++after_query_iterator; @@ -926,8 +903,7 @@ private: // newline is some trailing whitespace or comment, and we should // add it to our query. There are also several special cases // that are described below. - const auto * newline = find_first_symbols<'\n'>(this_query_end, - all_queries_end); + const auto * newline = find_first_symbols<'\n'>(this_query_end, all_queries_end); const char * next_query_begin = after_query_iterator->begin; // We include the entire line if the next query starts after @@ -953,14 +929,12 @@ private: { std::string text = server_exception->displayText(); auto embedded_stack_trace_pos = text.find("Stack trace"); - if (std::string::npos != embedded_stack_trace_pos - && !config().getBool("stacktrace", false)) + if (std::string::npos != embedded_stack_trace_pos && !config().getBool("stacktrace", false)) { text.resize(embedded_stack_trace_pos); } - std::cerr << "Received exception from server (version " - << server_version << "):" << std::endl << "Code: " - << server_exception->code() << ". " << text << std::endl; + std::cerr << "Received exception from server (version " << server_version << "):" << std::endl + << "Code: " << server_exception->code() << ". " << text << std::endl; if (is_interactive) { std::cerr << std::endl; @@ -969,9 +943,7 @@ private: if (client_exception) { - fmt::print(stderr, - "Error on processing query '{}':\n{}\n", - full_query, client_exception->message()); + fmt::print(stderr, "Error on processing query '{}':\n{}\n", full_query, client_exception->message()); if (is_interactive) { fmt::print(stderr, "\n"); @@ -1015,8 +987,7 @@ private: // Remove leading empty newlines and other whitespace, because they // are annoying to filter in query log. This is mostly relevant for // the tests. - while (this_query_begin < all_queries_end - && isWhitespaceASCII(*this_query_begin)) + while (this_query_begin < all_queries_end && isWhitespaceASCII(*this_query_begin)) { ++this_query_begin; } @@ -1034,8 +1005,7 @@ private: // and it makes more sense to treat them as such. { Tokens tokens(this_query_begin, all_queries_end); - IParser::Pos token_iterator(tokens, - context.getSettingsRef().max_parser_depth); + IParser::Pos token_iterator(tokens, context->getSettingsRef().max_parser_depth); if (!token_iterator.isValid()) { break; @@ -1053,17 +1023,14 @@ private: // Try to find test hint for syntax error. We don't know where // the query ends because we failed to parse it, so we consume // the entire line. - this_query_end = find_first_symbols<'\n'>(this_query_end, - all_queries_end); + this_query_end = find_first_symbols<'\n'>(this_query_end, all_queries_end); - TestHint hint(test_mode, - String(this_query_begin, this_query_end - this_query_begin)); + TestHint hint(test_mode, String(this_query_begin, this_query_end - this_query_begin)); if (hint.serverError()) { // Syntax errors are considered as client errors - e.addMessage("\nExpected server error '{}'.", - hint.serverError()); + e.addMessage("\nExpected server error '{}'.", hint.serverError()); throw; } @@ -1084,7 +1051,7 @@ private: if (ignore_error) { Tokens tokens(this_query_begin, all_queries_end); - IParser::Pos token_iterator(tokens, context.getSettingsRef().max_parser_depth); + IParser::Pos token_iterator(tokens, context->getSettingsRef().max_parser_depth); while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid()) ++token_iterator; this_query_begin = token_iterator->end; @@ -1109,18 +1076,13 @@ private: auto * insert_ast = parsed_query->as(); if (insert_ast && insert_ast->data) { - this_query_end = find_first_symbols<'\n'>(insert_ast->data, - all_queries_end); + this_query_end = find_first_symbols<'\n'>(insert_ast->data, all_queries_end); insert_ast->end = this_query_end; - query_to_send = all_queries_text.substr( - this_query_begin - all_queries_text.data(), - insert_ast->data - this_query_begin); + query_to_send = all_queries_text.substr(this_query_begin - all_queries_text.data(), insert_ast->data - this_query_begin); } else { - query_to_send = all_queries_text.substr( - this_query_begin - all_queries_text.data(), - this_query_end - this_query_begin); + query_to_send = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin); } // Try to include the trailing comment with test hints. It is just @@ -1129,14 +1091,11 @@ private: // after we have processed the query. But even this guess is // beneficial so that we see proper trailing comments in "echo" and // server log. - adjustQueryEnd(this_query_end, all_queries_end, - context.getSettingsRef().max_parser_depth); + adjustQueryEnd(this_query_end, all_queries_end, context->getSettingsRef().max_parser_depth); // full_query is the query + inline INSERT data + trailing comments // (the latter is our best guess for now). - full_query = all_queries_text.substr( - this_query_begin - all_queries_text.data(), - this_query_end - this_query_begin); + full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin); if (query_fuzzer_runs) { @@ -1155,8 +1114,7 @@ private: { // Surprisingly, this is a client error. A server error would // have been reported w/o throwing (see onReceiveSeverException()). - client_exception = std::make_unique( - getCurrentExceptionMessage(true), getCurrentExceptionCode()); + client_exception = std::make_unique(getCurrentExceptionMessage(true), getCurrentExceptionCode()); have_error = true; } @@ -1169,8 +1127,7 @@ private: if (insert_ast && insert_ast->data) { this_query_end = insert_ast->end; - adjustQueryEnd(this_query_end, all_queries_end, - context.getSettingsRef().max_parser_depth); + adjustQueryEnd(this_query_end, all_queries_end, context->getSettingsRef().max_parser_depth); } // Now we know for sure where the query ends. @@ -1178,8 +1135,7 @@ private: // comments, // e.g. insert into t format CSV 'a' -- { serverError 123 }. // Use the updated query boundaries we just calculated. - TestHint test_hint(test_mode, std::string(this_query_begin, - this_query_end - this_query_begin)); + TestHint test_hint(test_mode, std::string(this_query_begin, this_query_end - this_query_begin)); // Check whether the error (or its absence) matches the test hints // (or their absence). @@ -1191,16 +1147,13 @@ private: if (!server_exception) { error_matches_hint = false; - fmt::print(stderr, - "Expected server error code '{}' but got no server error.\n", - test_hint.serverError()); + fmt::print(stderr, "Expected server error code '{}' but got no server error.\n", test_hint.serverError()); } else if (server_exception->code() != test_hint.serverError()) { error_matches_hint = false; - std::cerr << "Expected server error code: " << - test_hint.serverError() << " but got: " << - server_exception->code() << "." << std::endl; + std::cerr << "Expected server error code: " << test_hint.serverError() << " but got: " << server_exception->code() + << "." << std::endl; } } @@ -1209,17 +1162,13 @@ private: if (!client_exception) { error_matches_hint = false; - fmt::print(stderr, - "Expected client error code '{}' but got no client error.\n", - test_hint.clientError()); + fmt::print(stderr, "Expected client error code '{}' but got no client error.\n", test_hint.clientError()); } else if (client_exception->code() != test_hint.clientError()) { error_matches_hint = false; - fmt::print(stderr, - "Expected client error code '{}' but got '{}'.\n", - test_hint.clientError(), - client_exception->code()); + fmt::print( + stderr, "Expected client error code '{}' but got '{}'.\n", test_hint.clientError(), client_exception->code()); } } @@ -1235,17 +1184,13 @@ private: { if (test_hint.clientError()) { - fmt::print(stderr, - "The query succeeded but the client error '{}' was expected.\n", - test_hint.clientError()); + fmt::print(stderr, "The query succeeded but the client error '{}' was expected.\n", test_hint.clientError()); error_matches_hint = false; } if (test_hint.serverError()) { - fmt::print(stderr, - "The query succeeded but the server error '{}' was expected.\n", - test_hint.serverError()); + fmt::print(stderr, "The query succeeded but the server error '{}' was expected.\n", test_hint.serverError()); error_matches_hint = false; } } @@ -1287,7 +1232,7 @@ private: // Prints changed settings to stderr. Useful for debugging fuzzing failures. void printChangedSettings() const { - const auto & changes = context.getSettingsRef().changes(); + const auto & changes = context->getSettingsRef().changes(); if (!changes.empty()) { fmt::print(stderr, "Changed settings: "); @@ -1297,8 +1242,7 @@ private: { fmt::print(stderr, ", "); } - fmt::print(stderr, "{} = '{}'", changes[i].name, - toString(changes[i].value)); + fmt::print(stderr, "{} = '{}'", changes[i].name, toString(changes[i].value)); } fmt::print(stderr, "\n"); } @@ -1346,8 +1290,7 @@ private: ASTPtr fuzz_base = orig_ast; for (size_t fuzz_step = 0; fuzz_step < this_query_runs; ++fuzz_step) { - fmt::print(stderr, "Fuzzing step {} out of {}\n", - fuzz_step, this_query_runs); + fmt::print(stderr, "Fuzzing step {} out of {}\n", fuzz_step, this_query_runs); ASTPtr ast_to_process; try @@ -1377,10 +1320,12 @@ private: { printChangedSettings(); - fmt::print(stderr, + fmt::print( + stderr, "Base before fuzz: {}\n" "Base after fuzz: {}\n", - base_before_fuzz, base_after_fuzz); + base_before_fuzz, + base_after_fuzz); fmt::print(stderr, "Dump before fuzz:\n{}\n", dump_before_fuzz.str()); fmt::print(stderr, "Dump of cloned AST:\n{}\n", dump_of_cloned_ast.str()); fmt::print(stderr, "Dump after fuzz:\n"); @@ -1389,7 +1334,9 @@ private: fuzz_base->dumpTree(cerr_buf); cerr_buf.next(); - fmt::print(stderr, "IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly."); + fmt::print( + stderr, + "IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly."); exit(1); } @@ -1413,18 +1360,35 @@ private: // uniformity. // Surprisingly, this is a client exception, because we get the // server exception w/o throwing (see onReceiveException()). - client_exception = std::make_unique( - getCurrentExceptionMessage(true), getCurrentExceptionCode()); + client_exception = std::make_unique(getCurrentExceptionMessage(true), getCurrentExceptionCode()); have_error = true; } if (have_error) { - const auto * exception = server_exception - ? server_exception.get() : client_exception.get(); - fmt::print(stderr, "Error on processing query '{}': {}\n", - ast_to_process->formatForErrorMessage(), - exception->message()); + const auto * exception = server_exception ? server_exception.get() : client_exception.get(); + fmt::print(stderr, "Error on processing query '{}': {}\n", ast_to_process->formatForErrorMessage(), exception->message()); + + // Try to reconnect after errors, for two reasons: + // 1. We might not have realized that the server died, e.g. if + // it sent us a trace and closed connection properly. + // 2. The connection might have gotten into a wrong state and + // the next query will get false positive about + // "Unknown packet from server". + try + { + connection->forceConnected(connection_parameters.timeouts); + } + catch (...) + { + // Just report it, we'll terminate below. + fmt::print(stderr, + "Error while reconnecting to the server: Code: {}: {}\n", + getCurrentExceptionCode(), + getCurrentExceptionMessage(true)); + + assert(!connection->isConnected()); + } } if (!connection->isConnected()) @@ -1458,18 +1422,26 @@ private: // when `lambda()` function gets substituted into a wrong place. // To avoid dealing with these cases, run the check only for the // queries we were able to successfully execute. - // The final caveat is that sometimes WITH queries are not executed, + // Another caveat is that sometimes WITH queries are not executed, // if they are not referenced by the main SELECT, so they can still // have the aforementioned problems. Disable this check for such // queries, for lack of a better solution. - if (!have_error && queryHasWithClause(parsed_query.get())) + // There is also a problem that fuzzer substitutes positive Int64 + // literals or Decimal literals, which are then parsed back as + // UInt64, and suddenly duplicate alias substitition starts or stops + // working (ASTWithAlias::formatImpl) or something like that. + // So we compare not even the first and second formatting of the + // query, but second and third. + // If you have to add any more workarounds to this check, just remove + // it altogether, it's not so useful. + if (!have_error && !queryHasWithClause(parsed_query.get())) { - ASTPtr parsed_formatted_query; + ASTPtr ast_2; try { const auto * tmp_pos = query_to_send.c_str(); - parsed_formatted_query = parseQuery(tmp_pos, - tmp_pos + query_to_send.size(), + + ast_2 = parseQuery(tmp_pos, tmp_pos + query_to_send.size(), false /* allow_multi_statements */); } catch (Exception & e) @@ -1480,25 +1452,30 @@ private: } } - if (parsed_formatted_query) + if (ast_2) { - const auto formatted_twice - = parsed_formatted_query->formatForErrorMessage(); - - if (formatted_twice != query_to_send) + const auto text_2 = ast_2->formatForErrorMessage(); + const auto * tmp_pos = text_2.c_str(); + const auto ast_3 = parseQuery(tmp_pos, tmp_pos + text_2.size(), + false /* allow_multi_statements */); + const auto text_3 = ast_3->formatForErrorMessage(); + if (text_3 != text_2) { fmt::print(stderr, "The query formatting is broken.\n"); printChangedSettings(); - fmt::print(stderr, "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", - formatted_twice, query_to_send); + fmt::print(stderr, + "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", + text_3, text_2); fmt::print(stderr, "In more detail:\n"); - fmt::print(stderr, "AST-1:\n'{}'\n", parsed_query->dumpTree()); + fmt::print(stderr, "AST-1 (generated by fuzzer):\n'{}'\n", parsed_query->dumpTree()); fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_send); - fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", parsed_formatted_query->dumpTree()); - fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", formatted_twice); - fmt::print(stderr, "Text-1 must be equal to Text-2, but it is not.\n"); + fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", ast_2->dumpTree()); + fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", text_2); + fmt::print(stderr, "AST-3 (Text-2 parsed):\n'{}'\n", ast_3->dumpTree()); + fmt::print(stderr, "Text-3 (AST-3 formatted):\n'{}'\n", text_3); + fmt::print(stderr, "Text-3 must be equal to Text-2, but it is not.\n"); exit(1); } @@ -1587,11 +1564,11 @@ private: if (is_interactive) { // Generate a new query_id - context.setCurrentQueryId(""); + context->setCurrentQueryId(""); for (const auto & query_id_format : query_id_formats) { writeString(query_id_format.first, std_out); - writeString(fmt::format(query_id_format.second, fmt::arg("query_id", context.getCurrentQueryId())), std_out); + writeString(fmt::format(query_id_format.second, fmt::arg("query_id", context->getCurrentQueryId())), std_out); writeChar('\n', std_out); std_out.next(); } @@ -1600,19 +1577,22 @@ private: watch.restart(); processed_rows = 0; progress.reset(); - show_progress_bar = false; - written_progress_chars = 0; - written_first_block = false; + progress_bar.show_progress_bar = false; + progress_bar.written_progress_chars = 0; + progress_bar.written_first_block = false; { /// Temporarily apply query settings to context. std::optional old_settings; - SCOPE_EXIT({ if (old_settings) context.setSettings(*old_settings); }); + SCOPE_EXIT_SAFE({ + if (old_settings) + context->setSettings(*old_settings); + }); auto apply_query_settings = [&](const IAST & settings_ast) { if (!old_settings) - old_settings.emplace(context.getSettingsRef()); - context.applySettingsChanges(settings_ast.as()->changes); + old_settings.emplace(context->getSettingsRef()); + context->applySettingsChanges(settings_ast.as()->changes); }; const auto * insert = parsed_query->as(); if (insert && insert->settings_ast) @@ -1650,7 +1630,7 @@ private: if (change.name == "profile") current_profile = change.value.safeGet(); else - context.applySettingChange(change); + context->applySettingChange(change); } } @@ -1666,8 +1646,7 @@ private: if (is_interactive) { - std::cout << std::endl - << processed_rows << " rows in set. Elapsed: " << watch.elapsedSeconds() << " sec. "; + std::cout << std::endl << processed_rows << " rows in set. Elapsed: " << watch.elapsedSeconds() << " sec. "; if (progress.read_rows >= 1000) writeFinalProgress(); @@ -1722,10 +1701,10 @@ private: connection->sendQuery( connection_parameters.timeouts, query_to_send, - context.getCurrentQueryId(), + context->getCurrentQueryId(), query_processing_stage, - &context.getSettingsRef(), - &context.getClientInfo(), + &context->getSettingsRef(), + &context->getClientInfo(), true); sendExternalTables(); @@ -1737,12 +1716,10 @@ private: { /// Retry when the server said "Client should retry" and no rows /// has been received yet. - if (processed_rows == 0 - && e.code() == ErrorCodes::DEADLOCK_AVOIDED - && --retries_left) + if (processed_rows == 0 && e.code() == ErrorCodes::DEADLOCK_AVOIDED && --retries_left) { std::cerr << "Got a transient error from the server, will" - << " retry (" << retries_left << " retries left)"; + << " retry (" << retries_left << " retries left)"; } else { @@ -1763,10 +1740,10 @@ private: connection->sendQuery( connection_parameters.timeouts, query_to_send, - context.getCurrentQueryId(), + context->getCurrentQueryId(), query_processing_stage, - &context.getSettingsRef(), - &context.getClientInfo(), + &context->getSettingsRef(), + &context->getClientInfo(), true); sendExternalTables(); @@ -1784,12 +1761,12 @@ private: } - ASTPtr parseQuery(const char * & pos, const char * end, bool allow_multi_statements) + ASTPtr parseQuery(const char *& pos, const char * end, bool allow_multi_statements) { ParserQuery parser(end); ASTPtr res; - const auto & settings = context.getSettingsRef(); + const auto & settings = context->getSettingsRef(); size_t max_length = 0; if (!allow_multi_statements) max_length = settings.max_query_size; @@ -1877,8 +1854,7 @@ private: current_format = insert->format; } - BlockInputStreamPtr block_input = context.getInputFormat( - current_format, buf, sample, insert_format_max_block_size); + BlockInputStreamPtr block_input = context->getInputFormat(current_format, buf, sample, insert_format_max_block_size); if (columns_description.hasDefaults()) block_input = std::make_shared(block_input, columns_description, context); @@ -1971,13 +1947,12 @@ private: /// to avoid losing sync. if (!cancelled) { - auto cancel_query = [&] - { + auto cancel_query = [&] { connection->sendCancel(); cancelled = true; if (is_interactive) { - clearProgress(); + progress_bar.clearProgress(); std::cout << "Cancelling query." << std::endl; } @@ -2066,8 +2041,8 @@ private: return false; default: - throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", - packet.type, connection->getDescription()); + throw Exception( + ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", packet.type, connection->getDescription()); } } @@ -2098,8 +2073,10 @@ private: return receiveSampleBlock(out, columns_description); default: - throw NetException("Unexpected packet from server (expected Data, Exception or Log, got " - + String(Protocol::Server::toString(packet.type)) + ")", ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER); + throw NetException( + "Unexpected packet from server (expected Data, Exception or Log, got " + + String(Protocol::Server::toString(packet.type)) + ")", + ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER); } } } @@ -2127,8 +2104,10 @@ private: break; default: - throw NetException("Unexpected packet from server (expected Exception, EndOfStream or Log, got " - + String(Protocol::Server::toString(packet.type)) + ")", ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER); + throw NetException( + "Unexpected packet from server (expected Exception, EndOfStream or Log, got " + + String(Protocol::Server::toString(packet.type)) + ")", + ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER); } } } @@ -2200,10 +2179,10 @@ private: current_format = "Vertical"; /// It is not clear how to write progress with parallel formatting. It may increase code complexity significantly. - if (!need_render_progress) - block_out_stream = context.getOutputStreamParallelIfPossible(current_format, *out_buf, block); + if (!progress_bar.need_render_progress) + block_out_stream = context->getOutputStreamParallelIfPossible(current_format, *out_buf, block); else - block_out_stream = context.getOutputStream(current_format, *out_buf, block); + block_out_stream = context->getOutputStream(current_format, *out_buf, block); block_out_stream->writePrefix(); } @@ -2231,8 +2210,8 @@ private: } else { - out_logs_buf = std::make_unique( - server_logs_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_APPEND | O_CREAT); + out_logs_buf + = std::make_unique(server_logs_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_APPEND | O_CREAT); wb = out_logs_buf.get(); } } @@ -2249,38 +2228,35 @@ private: return; processed_rows += block.rows(); + + /// Even if all blocks are empty, we still need to initialize the output stream to write empty resultset. initBlockOutputStream(block); /// The header block containing zero rows was used to initialize /// block_out_stream, do not output it. /// Also do not output too much data if we're fuzzing. - if (block.rows() != 0 - && (query_fuzzer_runs == 0 || processed_rows < 100)) - { - block_out_stream->write(block); - written_first_block = true; - } + if (block.rows() == 0 || (query_fuzzer_runs != 0 && processed_rows >= 100)) + return; - bool clear_progress = false; - if (need_render_progress) - clear_progress = std_out.offset() > 0; + if (progress_bar.need_render_progress) + progress_bar.clearProgress(); - if (clear_progress) - clearProgress(); + block_out_stream->write(block); + progress_bar.written_first_block = true; /// Received data block is immediately displayed to the user. block_out_stream->flush(); /// Restore progress bar after data block. - if (clear_progress) - writeProgress(); + if (progress_bar.need_render_progress) + progress_bar.writeProgress(progress, watch.elapsed()); } void onLogData(Block & block) { initLogsOutputStream(); - clearProgress(); + progress_bar.clearProgress(); logs_out_stream->write(block); logs_out_stream->flush(); } @@ -2301,128 +2277,26 @@ private: void onProgress(const Progress & value) { - if (!progress.incrementPiecewiseAtomically(value)) + if (!progress_bar.updateProgress(progress, value)) { // Just a keep-alive update. return; } if (block_out_stream) block_out_stream->onProgress(value); - - writeProgress(); - } - - - void clearProgress() - { - if (written_progress_chars) - { - written_progress_chars = 0; - std::cerr << "\r" CLEAR_TO_END_OF_LINE; - } - } - - - void writeProgress() - { - if (!need_render_progress) - return; - - /// Output all progress bar commands to stderr at once to avoid flicker. - WriteBufferFromFileDescriptor message(STDERR_FILENO, 1024); - - static size_t increment = 0; - static const char * indicators[8] = - { - "\033[1;30m→\033[0m", - "\033[1;31m↘\033[0m", - "\033[1;32m↓\033[0m", - "\033[1;33m↙\033[0m", - "\033[1;34m←\033[0m", - "\033[1;35m↖\033[0m", - "\033[1;36m↑\033[0m", - "\033[1m↗\033[0m", - }; - - const char * indicator = indicators[increment % 8]; - - size_t terminal_width = getTerminalWidth(); - - if (!written_progress_chars) - { - /// If the current line is not empty, the progress must be output on the next line. - /// The trick is found here: https://www.vidarholen.net/contents/blog/?p=878 - message << std::string(terminal_width, ' '); - } - message << '\r'; - - size_t prefix_size = message.count(); - - message << indicator << " Progress: "; - - message - << formatReadableQuantity(progress.read_rows) << " rows, " - << formatReadableSizeWithDecimalSuffix(progress.read_bytes); - - size_t elapsed_ns = watch.elapsed(); - if (elapsed_ns) - message << " (" - << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., " - << formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.) "; - else - message << ". "; - - written_progress_chars = message.count() - prefix_size - (strlen(indicator) - 2); /// Don't count invisible output (escape sequences). - - /// If the approximate number of rows to process is known, we can display a progress bar and percentage. - if (progress.total_rows_to_read > 0) - { - size_t total_rows_corrected = std::max(progress.read_rows, progress.total_rows_to_read); - - /// To avoid flicker, display progress bar only if .5 seconds have passed since query execution start - /// and the query is less than halfway done. - - if (elapsed_ns > 500000000) - { - /// Trigger to start displaying progress bar. If query is mostly done, don't display it. - if (progress.read_rows * 2 < total_rows_corrected) - show_progress_bar = true; - - if (show_progress_bar) - { - ssize_t width_of_progress_bar = static_cast(terminal_width) - written_progress_chars - strlen(" 99%"); - if (width_of_progress_bar > 0) - { - std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.read_rows, 0, total_rows_corrected, width_of_progress_bar)); - message << "\033[0;32m" << bar << "\033[0m"; - if (width_of_progress_bar > static_cast(bar.size() / UNICODE_BAR_CHAR_SIZE)) - message << std::string(width_of_progress_bar - bar.size() / UNICODE_BAR_CHAR_SIZE, ' '); - } - } - } - - /// Underestimate percentage a bit to avoid displaying 100%. - message << ' ' << (99 * progress.read_rows / total_rows_corrected) << '%'; - } - - message << CLEAR_TO_END_OF_LINE; - ++increment; - - message.next(); + progress_bar.writeProgress(progress, watch.elapsed()); } void writeFinalProgress() { - std::cout << "Processed " - << formatReadableQuantity(progress.read_rows) << " rows, " - << formatReadableSizeWithDecimalSuffix(progress.read_bytes); + std::cout << "Processed " << formatReadableQuantity(progress.read_rows) << " rows, " + << formatReadableSizeWithDecimalSuffix(progress.read_bytes); size_t elapsed_ns = watch.elapsed(); if (elapsed_ns) - std::cout << " (" - << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., " - << formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.) "; + std::cout << " (" << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., " + << formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.)"; else std::cout << ". "; } @@ -2445,7 +2319,7 @@ private: void onEndOfStream() { - clearProgress(); + progress_bar.clearProgress(); if (block_out_stream) block_out_stream->writeSuffix(); @@ -2455,9 +2329,9 @@ private: resetOutput(); - if (is_interactive && !written_first_block) + if (is_interactive && !progress_bar.written_first_block) { - clearProgress(); + progress_bar.clearProgress(); std::cout << "Ok." << std::endl; } } @@ -2467,6 +2341,16 @@ private: std::cout << DBMS_NAME << " client version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl; } + static void clearTerminal() + { + /// Clear from cursor until end of screen. + /// It is needed if garbage is left in terminal. + /// Show cursor. It can be left hidden by invocation of previous programs. + /// A test for this feature: perl -e 'print "x"x100000'; echo -ne '\033[0;0H\033[?25l'; clickhouse-client + std::cout << "\033[0J" + "\033[?25h"; + } + public: void init(int argc, char ** argv) { @@ -2482,7 +2366,7 @@ public: */ using Arguments = std::vector; - Arguments common_arguments{""}; /// 0th argument is ignored. + Arguments common_arguments{""}; /// 0th argument is ignored. std::vector external_tables_arguments; bool in_external_group = false; @@ -2496,22 +2380,19 @@ public: external_tables_arguments.emplace_back(Arguments{""}); } /// Options with value after equal sign. - else if (in_external_group - && (0 == strncmp(arg, "--file=", strlen("--file=")) - || 0 == strncmp(arg, "--name=", strlen("--name=")) - || 0 == strncmp(arg, "--format=", strlen("--format=")) - || 0 == strncmp(arg, "--structure=", strlen("--structure=")) - || 0 == strncmp(arg, "--types=", strlen("--types=")))) + else if ( + in_external_group + && (0 == strncmp(arg, "--file=", strlen("--file=")) || 0 == strncmp(arg, "--name=", strlen("--name=")) + || 0 == strncmp(arg, "--format=", strlen("--format=")) || 0 == strncmp(arg, "--structure=", strlen("--structure=")) + || 0 == strncmp(arg, "--types=", strlen("--types=")))) { external_tables_arguments.back().emplace_back(arg); } /// Options with value after whitespace. - else if (in_external_group - && (0 == strcmp(arg, "--file") - || 0 == strcmp(arg, "--name") - || 0 == strcmp(arg, "--format") - || 0 == strcmp(arg, "--structure") - || 0 == strcmp(arg, "--types"))) + else if ( + in_external_group + && (0 == strcmp(arg, "--file") || 0 == strcmp(arg, "--name") || 0 == strcmp(arg, "--format") + || 0 == strcmp(arg, "--structure") || 0 == strcmp(arg, "--types"))) { if (arg_num + 1 < argc) { @@ -2622,13 +2503,10 @@ public: /// Commandline options related to external tables. po::options_description external_description = createOptionsDescription("External tables options", terminal_width); - external_description.add_options() - ("file", po::value(), "data file or - for stdin") - ("name", po::value()->default_value("_data"), "name of the table") - ("format", po::value()->default_value("TabSeparated"), "data format") - ("structure", po::value(), "structure") - ("types", po::value(), "types") - ; + external_description.add_options()("file", po::value(), "data file or - for stdin")( + "name", + po::value()->default_value("_data"), + "name of the table")("format", po::value()->default_value("TabSeparated"), "data format")("structure", po::value(), "structure")("types", po::value(), "types"); /// Parse main commandline options. po::parsed_options parsed = po::command_line_parser(common_arguments).options(main_description).run(); @@ -2656,7 +2534,7 @@ public: /// Output of help message. if (options.count("help") - || (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help. + || (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help. { std::cout << main_description << "\n"; std::cout << external_description << "\n"; @@ -2696,12 +2574,12 @@ public: } } - context.makeGlobalContext(); - context.setSettings(cmd_settings); + context->makeGlobalContext(); + context->setSettings(cmd_settings); /// Copy settings-related program options to config. /// TODO: Is this code necessary? - for (const auto & setting : context.getSettingsRef().all()) + for (const auto & setting : context->getSettingsRef().all()) { const auto & name = setting.getName(); if (options.count(name)) @@ -2793,19 +2671,15 @@ public: { std::string traceparent = options["opentelemetry-traceparent"].as(); std::string error; - if (!context.getClientInfo().client_trace_context.parseTraceparentHeader( - traceparent, error)) + if (!context->getClientInfo().client_trace_context.parseTraceparentHeader(traceparent, error)) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Cannot parse OpenTelemetry traceparent '{}': {}", - traceparent, error); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot parse OpenTelemetry traceparent '{}': {}", traceparent, error); } } if (options.count("opentelemetry-tracestate")) { - context.getClientInfo().client_trace_context.tracestate = - options["opentelemetry-tracestate"].as(); + context->getClientInfo().client_trace_context.tracestate = options["opentelemetry-tracestate"].as(); } argsToConfig(common_arguments, config(), 100); @@ -2813,7 +2687,6 @@ public: clearPasswordFromCommandLine(argc, argv); } }; - } #pragma GCC diagnostic ignored "-Wunused-function" diff --git a/programs/client/ConnectionParameters.cpp b/programs/client/ConnectionParameters.cpp index 19734dd5ffa..6faf43759df 100644 --- a/programs/client/ConnectionParameters.cpp +++ b/programs/client/ConnectionParameters.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include @@ -60,7 +62,9 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati #endif } - compression = config.getBool("compression", true) ? Protocol::Compression::Enable : Protocol::Compression::Disable; + /// By default compression is disabled if address looks like localhost. + compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host))) + ? Protocol::Compression::Enable : Protocol::Compression::Disable; timeouts = ConnectionTimeouts( Poco::Timespan(config.getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0), diff --git a/programs/client/QueryFuzzer.cpp b/programs/client/QueryFuzzer.cpp index 0c8dc0731f9..721e5acb991 100644 --- a/programs/client/QueryFuzzer.cpp +++ b/programs/client/QueryFuzzer.cpp @@ -27,6 +27,7 @@ #include #include + namespace DB { @@ -37,34 +38,33 @@ namespace ErrorCodes Field QueryFuzzer::getRandomField(int type) { + static constexpr Int64 bad_int64_values[] + = {-2, -1, 0, 1, 2, 3, 7, 10, 100, 255, 256, 257, 1023, 1024, + 1025, 65535, 65536, 65537, 1024 * 1024 - 1, 1024 * 1024, + 1024 * 1024 + 1, INT_MIN - 1ll, INT_MIN, INT_MIN + 1, + INT_MAX - 1, INT_MAX, INT_MAX + 1ll, INT64_MIN, INT64_MIN + 1, + INT64_MAX - 1, INT64_MAX}; switch (type) { case 0: { - static constexpr Int64 values[] - = {-2, -1, 0, 1, 2, 3, 7, 10, 100, 255, 256, 257, 1023, 1024, - 1025, 65535, 65536, 65537, 1024 * 1024 - 1, 1024 * 1024, - 1024 * 1024 + 1, INT64_MIN, INT64_MAX}; - return values[fuzz_rand() % (sizeof(values) / sizeof(*values))]; + return bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) + / sizeof(*bad_int64_values))]; } case 1: { static constexpr float values[] - = {NAN, INFINITY, -INFINITY, 0., 0.0001, 0.5, 0.9999, - 1., 1.0001, 2., 10.0001, 100.0001, 1000.0001}; - return values[fuzz_rand() % (sizeof(values) / sizeof(*values))]; + = {NAN, INFINITY, -INFINITY, 0., -0., 0.0001, 0.5, 0.9999, + 1., 1.0001, 2., 10.0001, 100.0001, 1000.0001, 1e10, 1e20, + FLT_MIN, FLT_MIN + FLT_EPSILON, FLT_MAX, FLT_MAX + FLT_EPSILON}; return values[fuzz_rand() % (sizeof(values) / sizeof(*values))]; } case 2: { - static constexpr Int64 values[] - = {-2, -1, 0, 1, 2, 3, 7, 10, 100, 255, 256, 257, 1023, 1024, - 1025, 65535, 65536, 65537, 1024 * 1024 - 1, 1024 * 1024, - 1024 * 1024 + 1, INT64_MIN, INT64_MAX}; static constexpr UInt64 scales[] = {0, 1, 2, 10}; return DecimalField( - values[fuzz_rand() % (sizeof(values) / sizeof(*values))], - scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))] - ); + bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) + / sizeof(*bad_int64_values))], + scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))]); } default: assert(false); diff --git a/programs/client/QueryFuzzer.h b/programs/client/QueryFuzzer.h index 38714205967..7c79e683eb4 100644 --- a/programs/client/QueryFuzzer.h +++ b/programs/client/QueryFuzzer.h @@ -4,11 +4,14 @@ #include #include +#include + #include #include #include #include + namespace DB { @@ -50,7 +53,7 @@ struct QueryFuzzer // Some debug fields for detecting problematic ASTs with loops. // These are reset for each fuzzMain call. std::unordered_set debug_visited_nodes; - ASTPtr * debug_top_ast; + ASTPtr * debug_top_ast = nullptr; // This is the only function you have to call -- it will modify the passed diff --git a/programs/client/Suggest.cpp b/programs/client/Suggest.cpp index dfa7048349e..8d4c0fdbd5a 100644 --- a/programs/client/Suggest.cpp +++ b/programs/client/Suggest.cpp @@ -108,14 +108,6 @@ void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeo " UNION ALL " "SELECT cluster FROM system.clusters" " UNION ALL " - "SELECT name FROM system.errors" - " UNION ALL " - "SELECT event FROM system.events" - " UNION ALL " - "SELECT metric FROM system.asynchronous_metrics" - " UNION ALL " - "SELECT metric FROM system.metrics" - " UNION ALL " "SELECT macro FROM system.macros" " UNION ALL " "SELECT policy_name FROM system.storage_policies" @@ -139,17 +131,12 @@ void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeo query << ") WHERE notEmpty(res)"; - Settings settings; - /// To show all rows from: - /// - system.errors - /// - system.events - settings.system_events_show_zero_values = true; - fetch(connection, timeouts, query.str(), settings); + fetch(connection, timeouts, query.str()); } -void Suggest::fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query, Settings & settings) +void Suggest::fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query) { - connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete, &settings); + connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete); while (true) { diff --git a/programs/client/Suggest.h b/programs/client/Suggest.h index 0049bc08ebf..03332088cbe 100644 --- a/programs/client/Suggest.h +++ b/programs/client/Suggest.h @@ -33,7 +33,7 @@ public: private: void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit); - void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query, Settings & settings); + void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query); void fillWordsFromBlock(const Block & block); /// Words are fetched asynchronously. diff --git a/programs/config_tools.h.in b/programs/config_tools.h.in index 7cb5a6d883a..50ba0c16a83 100644 --- a/programs/config_tools.h.in +++ b/programs/config_tools.h.in @@ -15,3 +15,5 @@ #cmakedefine01 ENABLE_CLICKHOUSE_GIT_IMPORT #cmakedefine01 ENABLE_CLICKHOUSE_INSTALL #cmakedefine01 ENABLE_CLICKHOUSE_ODBC_BRIDGE +#cmakedefine01 ENABLE_CLICKHOUSE_LIBRARY_BRIDGE +#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER diff --git a/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt index f69b30f3f43..dfb067b00f9 100644 --- a/programs/copier/CMakeLists.txt +++ b/programs/copier/CMakeLists.txt @@ -1,7 +1,7 @@ set(CLICKHOUSE_COPIER_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp) + "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp") set (CLICKHOUSE_COPIER_LINK PRIVATE diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 2b19a401206..a60896388a0 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes void ClusterCopier::init() { - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); task_description_watch_callback = [this] (const Coordination::WatchResponse & response) { @@ -39,14 +39,14 @@ void ClusterCopier::init() task_cluster_initial_config = task_cluster_current_config; task_cluster->loadTasks(*task_cluster_initial_config); - context.setClustersConfig(task_cluster_initial_config, task_cluster->clusters_prefix); + getContext()->setClustersConfig(task_cluster_initial_config, task_cluster->clusters_prefix); /// Set up shards and their priority task_cluster->random_engine.seed(task_cluster->random_device()); for (auto & task_table : task_cluster->table_tasks) { - task_table.cluster_pull = context.getCluster(task_table.cluster_pull_name); - task_table.cluster_push = context.getCluster(task_table.cluster_push_name); + task_table.cluster_pull = getContext()->getCluster(task_table.cluster_pull_name); + task_table.cluster_push = getContext()->getCluster(task_table.cluster_push_name); task_table.initShards(task_cluster->random_engine); } @@ -206,7 +206,7 @@ void ClusterCopier::uploadTaskDescription(const std::string & task_path, const s if (task_config_str.empty()) return; - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); zookeeper->createAncestors(local_task_description_path); auto code = zookeeper->tryCreate(local_task_description_path, task_config_str, zkutil::CreateMode::Persistent); @@ -219,7 +219,7 @@ void ClusterCopier::uploadTaskDescription(const std::string & task_path, const s void ClusterCopier::reloadTaskDescription() { - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); task_description_watch_zookeeper = zookeeper; String task_config_str; @@ -235,7 +235,7 @@ void ClusterCopier::reloadTaskDescription() /// Setup settings task_cluster->reloadSettings(*config); - context.setSettings(task_cluster->settings_common); + getContext()->setSettings(task_cluster->settings_common); task_cluster_current_config = config; task_description_current_stat = stat; @@ -440,7 +440,7 @@ bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, cons { LOG_DEBUG(log, "Check that all shards processed partition {} piece {} successfully", partition_name, piece_number); - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); /// Collect all shards that contain partition piece number piece_number. Strings piece_status_paths; @@ -532,7 +532,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t LOG_DEBUG(log, "Try to move {} to destination table", partition_name); - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); const auto current_partition_attach_is_active = task_table.getPartitionAttachIsActivePath(partition_name); const auto current_partition_attach_is_done = task_table.getPartitionAttachIsDonePath(partition_name); @@ -599,11 +599,13 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t toString(current_piece_number)); Settings settings_push = task_cluster->settings_push; - - /// It is important, ALTER ATTACH PARTITION must be done synchronously - /// And we will execute this ALTER query on each replica of a shard. - /// It is correct, because this query is idempotent. - settings_push.replication_alter_partitions_sync = 2; + ClusterExecutionMode execution_mode = ClusterExecutionMode::ON_EACH_NODE; + UInt64 max_successful_executions_per_shard = 0; + if (settings_push.replication_alter_partitions_sync == 1) + { + execution_mode = ClusterExecutionMode::ON_EACH_SHARD; + max_successful_executions_per_shard = 1; + } query_alter_ast_string += " ALTER TABLE " + getQuotedTable(original_table) + ((partition_name == "'all'") ? " ATTACH PARTITION ID " : " ATTACH PARTITION ") + partition_name + @@ -613,14 +615,33 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t try { - size_t num_nodes = executeQueryOnCluster( - task_table.cluster_push, - query_alter_ast_string, - settings_push, - PoolMode::GET_MANY, - ClusterExecutionMode::ON_EACH_NODE); + /// Try attach partition on each shard + UInt64 num_nodes = executeQueryOnCluster( + task_table.cluster_push, + query_alter_ast_string, + task_cluster->settings_push, + PoolMode::GET_MANY, + execution_mode, + max_successful_executions_per_shard); - LOG_INFO(log, "Number of nodes that executed ALTER query successfully : {}", toString(num_nodes)); + if (settings_push.replication_alter_partitions_sync == 1) + { + LOG_INFO( + log, + "Destination tables {} have been executed alter query successfully on {} shards of {}", + getQuotedTable(task_table.table_push), + num_nodes, + task_table.cluster_push->getShardCount()); + + if (num_nodes != task_table.cluster_push->getShardCount()) + { + return TaskStatus::Error; + } + } + else + { + LOG_INFO(log, "Number of nodes that executed ALTER query successfully : {}", toString(num_nodes)); + } } catch (...) { @@ -693,6 +714,8 @@ ASTPtr ClusterCopier::removeAliasColumnsFromCreateQuery(const ASTPtr & query_ast new_columns_list->set(new_columns_list->columns, new_columns); if (const auto * indices = query_ast->as()->columns_list->indices) new_columns_list->set(new_columns_list->indices, indices->clone()); + if (const auto * projections = query_ast->as()->columns_list->projections) + new_columns_list->set(new_columns_list->projections, projections->clone()); new_query.replace(new_query.columns_list, new_columns_list); @@ -856,6 +879,16 @@ bool ClusterCopier::tryDropPartitionPiece( bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table) { + /// Create destination table + TaskStatus task_status = TaskStatus::Error; + + task_status = tryCreateDestinationTable(timeouts, task_table); + /// Exit if success + if (task_status != TaskStatus::Finished) + { + LOG_WARNING(log, "Create destination Tale Failed "); + return false; + } /// An heuristic: if previous shard is already done, then check next one without sleeps due to max_workers constraint bool previous_shard_is_instantly_finished = false; @@ -932,7 +965,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab /// Do not sleep if there is a sequence of already processed shards to increase startup bool is_unprioritized_task = !previous_shard_is_instantly_finished && shard->priority.is_remote; - TaskStatus task_status = TaskStatus::Error; + task_status = TaskStatus::Error; bool was_error = false; has_shard_to_process = true; for (UInt64 try_num = 0; try_num < max_shard_partition_tries; ++try_num) @@ -1050,6 +1083,44 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab return table_is_done; } +TaskStatus ClusterCopier::tryCreateDestinationTable(const ConnectionTimeouts & timeouts, TaskTable & task_table) +{ + /// Try create original table (if not exists) on each shard + + //TaskTable & task_table = task_shard.task_table; + const TaskShardPtr task_shard = task_table.all_shards.at(0); + /// We need to update table definitions for each part, it could be changed after ALTER + task_shard->current_pull_table_create_query = getCreateTableForPullShard(timeouts, *task_shard); + try + { + auto create_query_push_ast + = rewriteCreateQueryStorage(task_shard->current_pull_table_create_query, task_table.table_push, task_table.engine_push_ast); + auto & create = create_query_push_ast->as(); + create.if_not_exists = true; + InterpreterCreateQuery::prepareOnClusterQuery(create, getContext(), task_table.cluster_push_name); + String query = queryToString(create_query_push_ast); + + LOG_DEBUG(log, "Create destination tables. Query: {}", query); + UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, PoolMode::GET_MANY); + LOG_INFO( + log, + "Destination tables {} have been created on {} shards of {}", + getQuotedTable(task_table.table_push), + shards, + task_table.cluster_push->getShardCount()); + if (shards != task_table.cluster_push->getShardCount()) + { + return TaskStatus::Error; + } + } + catch (...) + { + tryLogCurrentException(log, "Error while creating original table. Maybe we are not first."); + } + + return TaskStatus::Finished; +} + /// Job for copying partition from particular shard. TaskStatus ClusterCopier::tryProcessPartitionTask(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, bool is_unprioritized_task) { @@ -1142,7 +1213,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( auto split_table_for_current_piece = task_shard.list_of_split_tables_on_shard[current_piece_number]; - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); const String piece_is_dirty_flag_path = partition_piece.getPartitionPieceIsDirtyPath(); const String piece_is_dirty_cleaned_path = partition_piece.getPartitionPieceIsCleanedPath(); @@ -1193,7 +1264,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( ParserQuery p_query(query.data() + query.size()); - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); return parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); }; @@ -1297,10 +1368,10 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( ASTPtr query_select_ast = get_select_query(split_table_for_current_piece, "count()", /*enable_splitting*/ true); UInt64 count; { - Context local_context = context; + auto local_context = Context::createCopy(context); // Use pull (i.e. readonly) settings, but fetch data from destination servers - local_context.setSettings(task_cluster->settings_pull); - local_context.setSetting("skip_unavailable_shards", true); + local_context->setSettings(task_cluster->settings_pull); + local_context->setSetting("skip_unavailable_shards", true); Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_select_ast, local_context)->execute().getInputStream()); count = (block) ? block.safeGetByPosition(0).column->getUInt(0) : 0; @@ -1366,8 +1437,17 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( LOG_DEBUG(log, "Create destination tables. Query: {}", query); UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, PoolMode::GET_MANY); - LOG_DEBUG(log, "Destination tables {} have been created on {} shards of {}", - getQuotedTable(task_table.table_push), shards, task_table.cluster_push->getShardCount()); + LOG_INFO( + log, + "Destination tables {} have been created on {} shards of {}", + getQuotedTable(task_table.table_push), + shards, + task_table.cluster_push->getShardCount()); + + if (shards != task_table.cluster_push->getShardCount()) + { + return TaskStatus::Error; + } } /// Do the copying @@ -1390,7 +1470,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( query += "INSERT INTO " + getQuotedTable(split_table_for_current_piece) + " VALUES "; ParserQuery p_query(query.data() + query.size()); - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); query_insert_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); LOG_DEBUG(log, "Executing INSERT query: {}", query); @@ -1398,18 +1478,18 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( try { - std::unique_ptr context_select = std::make_unique(context); + auto context_select = Context::createCopy(context); context_select->setSettings(task_cluster->settings_pull); - std::unique_ptr context_insert = std::make_unique(context); + auto context_insert = Context::createCopy(context); context_insert->setSettings(task_cluster->settings_push); /// Custom INSERT SELECT implementation BlockInputStreamPtr input; BlockOutputStreamPtr output; { - BlockIO io_select = InterpreterFactory::get(query_select_ast, *context_select)->execute(); - BlockIO io_insert = InterpreterFactory::get(query_insert_ast, *context_insert)->execute(); + BlockIO io_select = InterpreterFactory::get(query_select_ast, context_select)->execute(); + BlockIO io_insert = InterpreterFactory::get(query_insert_ast, context_insert)->execute(); input = io_select.getInputStream(); output = io_insert.out; @@ -1477,26 +1557,6 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( LOG_INFO(log, "Partition {} piece {} copied. But not moved to original destination table.", task_partition.name, toString(current_piece_number)); - - /// Try create original table (if not exists) on each shard - try - { - auto create_query_push_ast = rewriteCreateQueryStorage(task_shard.current_pull_table_create_query, - task_table.table_push, task_table.engine_push_ast); - auto & create = create_query_push_ast->as(); - create.if_not_exists = true; - InterpreterCreateQuery::prepareOnClusterQuery(create, context, task_table.cluster_push_name); - String query = queryToString(create_query_push_ast); - - LOG_DEBUG(log, "Create destination tables. Query: {}", query); - UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, PoolMode::GET_MANY); - LOG_DEBUG(log, "Destination tables {} have been created on {} shards of {}", getQuotedTable(task_table.table_push), shards, task_table.cluster_push->getShardCount()); - } - catch (...) - { - tryLogCurrentException(log, "Error while creating original table. Maybe we are not first."); - } - /// Finalize the processing, change state of current partition task (and also check is_dirty flag) { String state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id); @@ -1523,7 +1583,7 @@ void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast) const auto & create = create_ast->as(); dropLocalTableIfExists({create.database, create.table}); - InterpreterCreateQuery interpreter(create_ast, context); + InterpreterCreateQuery interpreter(create_ast, getContext()); interpreter.execute(); } @@ -1534,37 +1594,40 @@ void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_na drop_ast->database = table_name.first; drop_ast->table = table_name.second; - InterpreterDropQuery interpreter(drop_ast, context); + InterpreterDropQuery interpreter(drop_ast, getContext()); interpreter.execute(); } +void ClusterCopier::dropHelpingTablesByPieceNumber(const TaskTable & task_table, size_t current_piece_number) +{ + LOG_DEBUG(log, "Removing helping tables piece {}", current_piece_number); + + DatabaseAndTableName original_table = task_table.table_push; + DatabaseAndTableName helping_table + = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number)); + + String query = "DROP TABLE IF EXISTS " + getQuotedTable(helping_table); + + const ClusterPtr & cluster_push = task_table.cluster_push; + Settings settings_push = task_cluster->settings_push; + + LOG_DEBUG(log, "Execute distributed DROP TABLE: {}", query); + + /// We have to drop partition_piece on each replica + UInt64 num_nodes = executeQueryOnCluster(cluster_push, query, settings_push, PoolMode::GET_MANY, ClusterExecutionMode::ON_EACH_NODE); + + LOG_INFO(log, "DROP TABLE query was successfully executed on {} nodes.", toString(num_nodes)); +} void ClusterCopier::dropHelpingTables(const TaskTable & task_table) { LOG_DEBUG(log, "Removing helping tables"); for (size_t current_piece_number = 0; current_piece_number < task_table.number_of_splits; ++current_piece_number) { - DatabaseAndTableName original_table = task_table.table_push; - DatabaseAndTableName helping_table = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number)); - - String query = "DROP TABLE IF EXISTS " + getQuotedTable(helping_table); - - const ClusterPtr & cluster_push = task_table.cluster_push; - Settings settings_push = task_cluster->settings_push; - - LOG_DEBUG(log, "Execute distributed DROP TABLE: {}", query); - /// We have to drop partition_piece on each replica - UInt64 num_nodes = executeQueryOnCluster( - cluster_push, query, - settings_push, - PoolMode::GET_MANY, - ClusterExecutionMode::ON_EACH_NODE); - - LOG_DEBUG(log, "DROP TABLE query was successfully executed on {} nodes.", toString(num_nodes)); + dropHelpingTablesByPieceNumber(task_table, current_piece_number); } } - void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskTable & task_table, const String & partition_name) { LOG_DEBUG(log, "Try drop partition partition from all helping tables."); @@ -1586,15 +1649,15 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT PoolMode::GET_MANY, ClusterExecutionMode::ON_EACH_NODE); - LOG_DEBUG(log, "DROP PARTITION query was successfully executed on {} nodes.", toString(num_nodes)); + LOG_INFO(log, "DROP PARTITION query was successfully executed on {} nodes.", toString(num_nodes)); } LOG_DEBUG(log, "All helping tables dropped partition {}", partition_name); } String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings & settings) { - Context remote_context(context); - remote_context.setSettings(settings); + auto remote_context = Context::createCopy(context); + remote_context->setSettings(settings); String query = "SHOW CREATE TABLE " + getQuotedTable(table); Block block = getBlockWithAllStreamData(std::make_shared( @@ -1613,7 +1676,7 @@ ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & time task_cluster->settings_pull); ParserCreateQuery parser_create_query; - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); return parseQuery(parser_create_query, create_query_pull_str, settings.max_query_size, settings.max_parser_depth); } @@ -1642,7 +1705,7 @@ void ClusterCopier::createShardInternalTables(const ConnectionTimeouts & timeout /// Create special cluster with single shard String shard_read_cluster_name = read_shard_prefix + task_table.cluster_pull_name; ClusterPtr cluster_pull_current_shard = task_table.cluster_pull->getClusterWithSingleShard(task_shard.indexInCluster()); - context.setCluster(shard_read_cluster_name, cluster_pull_current_shard); + getContext()->setCluster(shard_read_cluster_name, cluster_pull_current_shard); auto storage_shard_ast = createASTStorageDistributed(shard_read_cluster_name, task_table.table_pull.first, task_table.table_pull.second); @@ -1702,13 +1765,13 @@ std::set ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti } ParserQuery parser_query(query.data() + query.size()); - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); LOG_DEBUG(log, "Computing destination partition set, executing query: {}", query); - Context local_context = context; - local_context.setSettings(task_cluster->settings_pull); + auto local_context = Context::createCopy(context); + local_context->setSettings(task_cluster->settings_pull); Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_ast, local_context)->execute().getInputStream()); if (block) @@ -1748,11 +1811,11 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts, LOG_DEBUG(log, "Checking shard {} for partition {} existence, executing query: {}", task_shard.getDescription(), partition_quoted_name, query); ParserQuery parser_query(query.data() + query.size()); -const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); - Context local_context = context; - local_context.setSettings(task_cluster->settings_pull); + auto local_context = Context::createCopy(context); + local_context->setSettings(task_cluster->settings_pull); return InterpreterFactory::get(query_ast, local_context)->execute().getInputStream()->read().rows() != 0; } @@ -1787,11 +1850,11 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi LOG_DEBUG(log, "Checking shard {} for partition {} piece {} existence, executing query: {}", task_shard.getDescription(), partition_quoted_name, std::to_string(current_piece_number), query); ParserQuery parser_query(query.data() + query.size()); - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); - Context local_context = context; - local_context.setSettings(task_cluster->settings_pull); + auto local_context = Context::createCopy(context); + local_context->setSettings(task_cluster->settings_pull); auto result = InterpreterFactory::get(query_ast, local_context)->execute().getInputStream()->read().rows(); if (result != 0) LOG_DEBUG(log, "Partition {} piece number {} is PRESENT on shard {}", partition_quoted_name, std::to_string(current_piece_number), task_shard.getDescription()); @@ -1847,7 +1910,7 @@ UInt64 ClusterCopier::executeQueryOnCluster( /// In that case we don't have local replicas, but do it just in case for (UInt64 i = 0; i < num_local_replicas; ++i) { - auto interpreter = InterpreterFactory::get(query_ast, context); + auto interpreter = InterpreterFactory::get(query_ast, getContext()); interpreter->execute(); if (increment_and_check_exit()) @@ -1862,8 +1925,8 @@ UInt64 ClusterCopier::executeQueryOnCluster( auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(shard_settings).getSaturated(shard_settings.max_execution_time); auto connections = shard.pool->getMany(timeouts, &shard_settings, pool_mode); - Context shard_context(context); - shard_context.setSettings(shard_settings); + auto shard_context = Context::createCopy(context); + shard_context->setSettings(shard_settings); for (auto & connection : connections) { diff --git a/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h index 9aff5493cf8..085fa2ece06 100644 --- a/programs/copier/ClusterCopier.h +++ b/programs/copier/ClusterCopier.h @@ -12,18 +12,17 @@ namespace DB { -class ClusterCopier +class ClusterCopier : WithMutableContext { public: ClusterCopier(const String & task_path_, const String & host_id_, const String & proxy_database_name_, - Context & context_) - : + ContextMutablePtr context_) + : WithMutableContext(context_), task_zookeeper_path(task_path_), host_id(host_id_), working_database_name(proxy_database_name_), - context(context_), log(&Poco::Logger::get("ClusterCopier")) {} void init(); @@ -36,7 +35,7 @@ public: /// Compute set of partitions, assume set of partitions aren't changed during the processing void discoverTablePartitions(const ConnectionTimeouts & timeouts, TaskTable & task_table, UInt64 num_threads = 0); - void uploadTaskDescription(const std::string & task_path, const std::string & task_file, const bool force); + void uploadTaskDescription(const std::string & task_path, const std::string & task_file, bool force); void reloadTaskDescription(); @@ -120,15 +119,16 @@ protected: /// Removes MATERIALIZED and ALIAS columns from create table query static ASTPtr removeAliasColumnsFromCreateQuery(const ASTPtr & query_ast); - bool tryDropPartitionPiece(ShardPartition & task_partition, const size_t current_piece_number, + bool tryDropPartitionPiece(ShardPartition & task_partition, size_t current_piece_number, const zkutil::ZooKeeperPtr & zookeeper, const CleanStateClock & clean_state_clock); - static constexpr UInt64 max_table_tries = 1000; - static constexpr UInt64 max_shard_partition_tries = 600; - static constexpr UInt64 max_shard_partition_piece_tries_for_alter = 100; + static constexpr UInt64 max_table_tries = 3; + static constexpr UInt64 max_shard_partition_tries = 3; + static constexpr UInt64 max_shard_partition_piece_tries_for_alter = 3; bool tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table); + TaskStatus tryCreateDestinationTable(const ConnectionTimeouts & timeouts, TaskTable & task_table); /// Job for copying partition from particular shard. TaskStatus tryProcessPartitionTask(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, @@ -140,7 +140,7 @@ protected: TaskStatus processPartitionPieceTaskImpl(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, - const size_t current_piece_number, + size_t current_piece_number, bool is_unprioritized_task); void dropAndCreateLocalTable(const ASTPtr & create_ast); @@ -149,6 +149,8 @@ protected: void dropHelpingTables(const TaskTable & task_table); + void dropHelpingTablesByPieceNumber(const TaskTable & task_table, size_t current_piece_number); + /// Is used for usage less disk space. /// After all pieces were successfully moved to original destination /// table we can get rid of partition pieces (partitions in helping tables). @@ -216,7 +218,6 @@ private: bool experimental_use_sample_offset{false}; - Context & context; Poco::Logger * log; std::chrono::milliseconds default_sleep_time{1000}; diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index e3169a49ecf..8925ab63f99 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -3,8 +3,11 @@ #include #include #include +#include #include +#include +namespace fs = std::filesystem; namespace DB { @@ -25,7 +28,7 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self) copy_fault_probability = std::max(std::min(config().getDouble("copy-fault-probability"), 1.0), 0.0); if (config().has("move-fault-probability")) move_fault_probability = std::max(std::min(config().getDouble("move-fault-probability"), 1.0), 0.0); - base_dir = (config().has("base-dir")) ? config().getString("base-dir") : Poco::Path::current(); + base_dir = (config().has("base-dir")) ? config().getString("base-dir") : fs::current_path().string(); if (config().has("experimental-use-sample-offset")) @@ -37,18 +40,18 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self) process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid); host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id; - process_path = Poco::Path(base_dir + "/clickhouse-copier_" + process_id).absolute().toString(); - Poco::File(process_path).createDirectories(); + process_path = fs::weakly_canonical(fs::path(base_dir) / ("clickhouse-copier_" + process_id)); + fs::create_directories(process_path); /// Override variables for BaseDaemon if (config().has("log-level")) config().setString("logger.level", config().getString("log-level")); if (config().has("base-dir") || !config().has("logger.log")) - config().setString("logger.log", process_path + "/log.log"); + config().setString("logger.log", fs::path(process_path) / "log.log"); if (config().has("base-dir") || !config().has("logger.errorlog")) - config().setString("logger.errorlog", process_path + "/log.err.log"); + config().setString("logger.errorlog", fs::path(process_path) / "log.err.log"); Base::initialize(self); } @@ -110,9 +113,9 @@ void ClusterCopierApp::mainImpl() LOG_INFO(log, "Starting clickhouse-copier (id {}, host_id {}, path {}, revision {})", process_id, host_id, process_path, ClickHouseRevision::getVersionRevision()); SharedContextHolder shared_context = Context::createShared(); - auto context = std::make_unique(Context::createGlobal(shared_context.get())); + auto context = Context::createGlobal(shared_context.get()); context->makeGlobalContext(); - SCOPE_EXIT(context->shutdown()); + SCOPE_EXIT_SAFE(context->shutdown()); context->setConfig(loaded_config.configuration); context->setApplicationType(Context::ApplicationType::LOCAL); @@ -127,13 +130,13 @@ void ClusterCopierApp::mainImpl() registerFormats(); static const std::string default_database = "_local"; - DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, *context)); + DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, context)); context->setCurrentDatabase(default_database); /// Initialize query scope just in case. - CurrentThread::QueryScope query_scope(*context); + CurrentThread::QueryScope query_scope(context); - auto copier = std::make_unique(task_path, host_id, default_database, *context); + auto copier = std::make_unique(task_path, host_id, default_database, context); copier->setSafeMode(is_safe_mode); copier->setCopyFaultProbability(copy_fault_probability); copier->setMoveFaultProbability(move_fault_probability); diff --git a/programs/copier/Internals.cpp b/programs/copier/Internals.cpp index ea2be469945..bec612a8226 100644 --- a/programs/copier/Internals.cpp +++ b/programs/copier/Internals.cpp @@ -222,8 +222,8 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast) { String pk_column = primary_key_expr_list->children[i]->getColumnName(); if (pk_column != sorting_key_column) - throw Exception("Primary key must be a prefix of the sorting key, but in position " - + toString(i) + " its column is " + pk_column + ", not " + sorting_key_column, + throw Exception("Primary key must be a prefix of the sorting key, but the column in the position " + + toString(i) + " is " + sorting_key_column +", not " + pk_column, ErrorCodes::BAD_ARGUMENTS); if (!primary_key_columns_set.emplace(pk_column).second) diff --git a/programs/copier/Internals.h b/programs/copier/Internals.h index 7e45c0ea2ee..9e40d7ebd7b 100644 --- a/programs/copier/Internals.h +++ b/programs/copier/Internals.h @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include diff --git a/programs/copier/TaskCluster.h b/programs/copier/TaskCluster.h index 5b28f461dd8..1a50597d07f 100644 --- a/programs/copier/TaskCluster.h +++ b/programs/copier/TaskCluster.h @@ -98,6 +98,7 @@ inline void DB::TaskCluster::reloadSettings(const Poco::Util::AbstractConfigurat set_default_value(settings_pull.max_block_size, 8192UL); set_default_value(settings_pull.preferred_block_size_bytes, 0); set_default_value(settings_push.insert_distributed_timeout, 0); + set_default_value(settings_push.replication_alter_partitions_sync, 2); } } diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index ba3d6e8557b..5bf19191353 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -102,8 +102,8 @@ int mainEntryClickHouseFormat(int argc, char ** argv) } SharedContextHolder shared_context = Context::createShared(); - Context context = Context::createGlobal(shared_context.get()); - context.makeGlobalContext(); + auto context = Context::createGlobal(shared_context.get()); + context->makeGlobalContext(); registerFunctions(); registerAggregateFunctions(); diff --git a/programs/git-import/git-import.cpp b/programs/git-import/git-import.cpp index b07435dcf78..7977cfba79d 100644 --- a/programs/git-import/git-import.cpp +++ b/programs/git-import/git-import.cpp @@ -774,7 +774,7 @@ UInt128 diffHash(const CommitDiff & file_changes) } UInt128 hash_of_diff; - hasher.get128(hash_of_diff.low, hash_of_diff.high); + hasher.get128(hash_of_diff.items[0], hash_of_diff.items[1]); return hash_of_diff; } diff --git a/programs/install/Install.cpp b/programs/install/Install.cpp index ef72624e7ab..a7f566a78b8 100644 --- a/programs/install/Install.cpp +++ b/programs/install/Install.cpp @@ -71,6 +71,9 @@ namespace ErrorCodes } +/// ANSI escape sequence for intense color in terminal. +#define HILITE "\033[1m" +#define END_HILITE "\033[0m" using namespace DB; namespace po = boost::program_options; @@ -285,7 +288,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv) bool is_symlink = fs::is_symlink(symlink_path); fs::path points_to; if (is_symlink) - points_to = fs::absolute(fs::read_symlink(symlink_path)); + points_to = fs::weakly_canonical(fs::read_symlink(symlink_path)); if (is_symlink && points_to == main_bin_path) { @@ -559,20 +562,32 @@ int mainEntryClickHouseInstall(int argc, char ** argv) bool stdin_is_a_tty = isatty(STDIN_FILENO); bool stdout_is_a_tty = isatty(STDOUT_FILENO); - bool is_interactive = stdin_is_a_tty && stdout_is_a_tty; + + /// dpkg or apt installers can ask for non-interactive work explicitly. + + const char * debian_frontend_var = getenv("DEBIAN_FRONTEND"); + bool noninteractive = debian_frontend_var && debian_frontend_var == std::string_view("noninteractive"); + + bool is_interactive = !noninteractive && stdin_is_a_tty && stdout_is_a_tty; + + /// We can ask password even if stdin is closed/redirected but /dev/tty is available. + bool can_ask_password = !noninteractive && stdout_is_a_tty; if (has_password_for_default_user) { - fmt::print("Password for default user is already specified. To remind or reset, see {} and {}.\n", + fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE "\n", users_config_file.string(), users_d.string()); } - else if (!is_interactive) + else if (!can_ask_password) { - fmt::print("Password for default user is empty string. See {} and {} to change it.\n", + fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n", users_config_file.string(), users_d.string()); } else { + /// NOTE: When installing debian package with dpkg -i, stdin is not a terminal but we are still being able to enter password. + /// More sophisticated method with /dev/tty is used inside the `readpassphrase` function. + char buf[1000] = {}; std::string password; if (auto * result = readpassphrase("Enter password for default user: ", buf, sizeof(buf), 0)) @@ -600,7 +615,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv) "
\n"; out.sync(); out.finalize(); - fmt::print("Password for default user is saved in file {}.\n", password_file); + fmt::print(HILITE "Password for default user is saved in file {}." END_HILITE "\n", password_file); #else out << "\n" " \n" @@ -611,12 +626,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv) "\n"; out.sync(); out.finalize(); - fmt::print("Password for default user is saved in plaintext in file {}.\n", password_file); + fmt::print(HILITE "Password for default user is saved in plaintext in file {}." END_HILITE "\n", password_file); #endif has_password_for_default_user = true; } else - fmt::print("Password for default user is empty string. See {} and {} to change it.\n", + fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n", users_config_file.string(), users_d.string()); } @@ -641,7 +656,6 @@ int mainEntryClickHouseInstall(int argc, char ** argv) " This is optional. Taskstats accounting will be disabled." " To enable taskstats accounting you may add the required capability later manually.\"", "/tmp/test_setcap.sh", fs::canonical(main_bin_path).string()); - fmt::print(" {}\n", command); executeScript(command); #endif @@ -830,8 +844,8 @@ namespace fmt::print("The pidof command returned unusual output.\n"); } - WriteBufferFromFileDescriptor stderr(STDERR_FILENO); - copyData(sh->err, stderr); + WriteBufferFromFileDescriptor std_err(STDERR_FILENO); + copyData(sh->err, std_err); sh->tryWait(); } @@ -842,6 +856,13 @@ namespace { fmt::print("The process with pid = {} is running.\n", pid); } + else if (errno == ESRCH) + { + fmt::print("The process with pid = {} does not exist.\n", pid); + return 0; + } + else + throwFromErrno(fmt::format("Cannot obtain the status of pid {} with `kill`", pid), ErrorCodes::CANNOT_KILL); } if (!pid) @@ -962,7 +983,7 @@ int mainEntryClickHouseStop(int argc, char ** argv) desc.add_options() ("help,h", "produce help message") ("pid-path", po::value()->default_value("/var/run/clickhouse-server"), "directory for pid file") - ("force", po::value()->default_value(false), "Stop with KILL signal instead of TERM") + ("force", po::bool_switch(), "Stop with KILL signal instead of TERM") ; po::variables_map options; diff --git a/programs/keeper/CMakeLists.txt b/programs/keeper/CMakeLists.txt new file mode 100644 index 00000000000..e604d0e304e --- /dev/null +++ b/programs/keeper/CMakeLists.txt @@ -0,0 +1,24 @@ +set(CLICKHOUSE_KEEPER_SOURCES + Keeper.cpp +) + +if (OS_LINUX) + set (LINK_RESOURCE_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $ -Wl,${NO_WHOLE_ARCHIVE}") +endif () + +set (CLICKHOUSE_KEEPER_LINK + PRIVATE + clickhouse_common_config + clickhouse_common_io + clickhouse_common_zookeeper + daemon + dbms + + ${LINK_RESOURCE_LIB} +) + +clickhouse_program_add(keeper) + +install (FILES keeper_config.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-keeper" COMPONENT clickhouse-keeper) + +clickhouse_embed_binaries(keeper keeper_config.xml keeper_embedded.xml) diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp new file mode 100644 index 00000000000..8b35ec12850 --- /dev/null +++ b/programs/keeper/Keeper.cpp @@ -0,0 +1,468 @@ +#include "Keeper.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(ARCADIA_BUILD) +# include "config_core.h" +# include "Common/config_version.h" +#endif + +#if USE_SSL +# include +# include +#endif + +#include + +#if defined(OS_LINUX) +# include +# include +#endif + + +int mainEntryClickHouseKeeper(int argc, char ** argv) +{ + DB::Keeper app; + + try + { + return app.run(argc, argv); + } + catch (...) + { + std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; + auto code = DB::getCurrentExceptionCode(); + return code ? code : 1; + } +} + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NO_ELEMENTS_IN_CONFIG; + extern const int SUPPORT_IS_DISABLED; + extern const int NETWORK_ERROR; + extern const int MISMATCHING_USERS_FOR_PROCESS_AND_DATA; + extern const int FAILED_TO_GETPWUID; +} + +namespace +{ + +int waitServersToFinish(std::vector & servers, size_t seconds_to_wait) +{ + const int sleep_max_ms = 1000 * seconds_to_wait; + const int sleep_one_ms = 100; + int sleep_current_ms = 0; + int current_connections = 0; + for (;;) + { + current_connections = 0; + + for (auto & server : servers) + { + server.stop(); + current_connections += server.currentConnections(); + } + + if (!current_connections) + break; + + sleep_current_ms += sleep_one_ms; + if (sleep_current_ms < sleep_max_ms) + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms)); + else + break; + } + return current_connections; +} + +Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log) +{ + Poco::Net::SocketAddress socket_address; + try + { + socket_address = Poco::Net::SocketAddress(host, port); + } + catch (const Poco::Net::DNSException & e) + { + const auto code = e.code(); + if (code == EAI_FAMILY +#if defined(EAI_ADDRFAMILY) + || code == EAI_ADDRFAMILY +#endif + ) + { + LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. " + "If it is an IPv6 address and your host has disabled IPv6, then consider to " + "specify IPv4 address to listen in element of configuration " + "file. Example: 0.0.0.0", + host, e.code(), e.message()); + } + + throw; + } + return socket_address; +} + +[[noreturn]] void forceShutdown() +{ +#if defined(THREAD_SANITIZER) && defined(OS_LINUX) + /// Thread sanitizer tries to do something on exit that we don't need if we want to exit immediately, + /// while connection handling threads are still run. + (void)syscall(SYS_exit_group, 0); + __builtin_unreachable(); +#else + _exit(0); +#endif +} + +std::string getUserName(uid_t user_id) +{ + /// Try to convert user id into user name. + auto buffer_size = sysconf(_SC_GETPW_R_SIZE_MAX); + if (buffer_size <= 0) + buffer_size = 1024; + std::string buffer; + buffer.reserve(buffer_size); + + struct passwd passwd_entry; + struct passwd * result = nullptr; + const auto error = getpwuid_r(user_id, &passwd_entry, buffer.data(), buffer_size, &result); + + if (error) + throwFromErrno("Failed to find user name for " + toString(user_id), ErrorCodes::FAILED_TO_GETPWUID, error); + else if (result) + return result->pw_name; + return toString(user_id); +} + +} + +Poco::Net::SocketAddress Keeper::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const +{ + auto address = makeSocketAddress(host, port, &logger()); +#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100 + if (secure) + /// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl + /// https://github.com/pocoproject/poco/pull/2257 + socket.bind(address, /* reuseAddress = */ true); + else +#endif +#if POCO_VERSION < 0x01080000 + socket.bind(address, /* reuseAddress = */ true); +#else + socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false)); +#endif + + socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64)); + + return address; +} + +void Keeper::createServer(const std::string & listen_host, const char * port_name, bool listen_try, CreateServerFunc && func) const +{ + /// For testing purposes, user may omit tcp_port or http_port or https_port in configuration file. + if (!config().has(port_name)) + return; + + auto port = config().getInt(port_name); + try + { + func(port); + } + catch (const Poco::Exception &) + { + std::string message = "Listen [" + listen_host + "]:" + std::to_string(port) + " failed: " + getCurrentExceptionMessage(false); + + if (listen_try) + { + LOG_WARNING(&logger(), "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to " + "specify not disabled IPv4 or IPv6 address to listen in element of configuration " + "file. Example for disabled IPv6: 0.0.0.0 ." + " Example for disabled IPv4: ::", + message); + } + else + { + throw Exception{message, ErrorCodes::NETWORK_ERROR}; + } + } +} + +void Keeper::uninitialize() +{ + logger().information("shutting down"); + BaseDaemon::uninitialize(); +} + +int Keeper::run() +{ + if (config().hasOption("help")) + { + Poco::Util::HelpFormatter help_formatter(Keeper::options()); + auto header_str = fmt::format("{} [OPTION] [-- [ARG]...]\n" + "positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010", + commandName()); + help_formatter.setHeader(header_str); + help_formatter.format(std::cout); + return 0; + } + if (config().hasOption("version")) + { + std::cout << DBMS_NAME << " keeper version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl; + return 0; + } + + return Application::run(); // NOLINT +} + +void Keeper::initialize(Poco::Util::Application & self) +{ + BaseDaemon::initialize(self); + logger().information("starting up"); + + LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}", + Poco::Environment::osName(), + Poco::Environment::osVersion(), + Poco::Environment::osArchitecture()); +} + +std::string Keeper::getDefaultConfigFileName() const +{ + return "keeper_config.xml"; +} + +void Keeper::defineOptions(Poco::Util::OptionSet & options) +{ + options.addOption( + Poco::Util::Option("help", "h", "show help and exit") + .required(false) + .repeatable(false) + .binding("help")); + options.addOption( + Poco::Util::Option("version", "V", "show version and exit") + .required(false) + .repeatable(false) + .binding("version")); + BaseDaemon::defineOptions(options); +} + +int Keeper::main(const std::vector & /*args*/) +{ + Poco::Logger * log = &logger(); + + UseSSL use_ssl; + + MainThreadStatus::getInstance(); + +#if !defined(NDEBUG) || !defined(__OPTIMIZE__) + LOG_WARNING(log, "Keeper was built in debug mode. It will work slowly."); +#endif + +#if defined(SANITIZER) + LOG_WARNING(log, "Keeper was built with sanitizer. It will work slowly."); +#endif + + auto shared_context = Context::createShared(); + global_context = Context::createGlobal(shared_context.get()); + + global_context->makeGlobalContext(); + global_context->setApplicationType(Context::ApplicationType::KEEPER); + + if (!config().has("keeper_server")) + throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Keeper configuration ( section) not found in config"); + + + std::string path; + + if (config().has("keeper_server.storage_path")) + path = config().getString("keeper_server.storage_path"); + else if (config().has("keeper_server.log_storage_path")) + path = config().getString("keeper_server.log_storage_path"); + else if (config().has("keeper_server.snapshot_storage_path")) + path = config().getString("keeper_server.snapshot_storage_path"); + else + path = std::filesystem::path{KEEPER_DEFAULT_PATH}; + + + /// Check that the process user id matches the owner of the data. + const auto effective_user_id = geteuid(); + struct stat statbuf; + if (stat(path.c_str(), &statbuf) == 0 && effective_user_id != statbuf.st_uid) + { + const auto effective_user = getUserName(effective_user_id); + const auto data_owner = getUserName(statbuf.st_uid); + std::string message = "Effective user of the process (" + effective_user + + ") does not match the owner of the data (" + data_owner + ")."; + if (effective_user_id == 0) + { + message += " Run under 'sudo -u " + data_owner + "'."; + throw Exception(message, ErrorCodes::MISMATCHING_USERS_FOR_PROCESS_AND_DATA); + } + else + { + LOG_WARNING(log, message); + } + } + + const Settings & settings = global_context->getSettingsRef(); + + GlobalThreadPool::initialize(config().getUInt("max_thread_pool_size", 100)); + + static ServerErrorHandler error_handler; + Poco::ErrorHandler::set(&error_handler); + + /// Initialize DateLUT early, to not interfere with running time of first query. + LOG_DEBUG(log, "Initializing DateLUT."); + DateLUT::instance(); + LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::instance().getTimeZone()); + + /// Don't want to use DNS cache + DNSResolver::instance().setDisableCacheFlag(); + + Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024)); + + std::vector listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host"); + + bool listen_try = config().getBool("listen_try", false); + if (listen_hosts.empty()) + { + listen_hosts.emplace_back("::1"); + listen_hosts.emplace_back("127.0.0.1"); + listen_try = true; + } + + auto servers = std::make_shared>(); + + /// Initialize test keeper RAFT. Do nothing if no nu_keeper_server in config. + global_context->initializeKeeperStorageDispatcher(); + for (const auto & listen_host : listen_hosts) + { + /// TCP Keeper + const char * port_name = "keeper_server.tcp_port"; + createServer(listen_host, port_name, listen_try, [&](UInt16 port) + { + Poco::Net::ServerSocket socket; + auto address = socketBindListen(socket, listen_host, port); + socket.setReceiveTimeout(settings.receive_timeout); + socket.setSendTimeout(settings.send_timeout); + servers->emplace_back( + port_name, + std::make_unique( + new KeeperTCPHandlerFactory(*this, false), server_pool, socket, new Poco::Net::TCPServerParams)); + + LOG_INFO(log, "Listening for connections to Keeper (tcp): {}", address.toString()); + }); + + const char * secure_port_name = "keeper_server.tcp_port_secure"; + createServer(listen_host, secure_port_name, listen_try, [&](UInt16 port) + { +#if USE_SSL + Poco::Net::SecureServerSocket socket; + auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + socket.setReceiveTimeout(settings.receive_timeout); + socket.setSendTimeout(settings.send_timeout); + servers->emplace_back( + secure_port_name, + std::make_unique( + new KeeperTCPHandlerFactory(*this, true), server_pool, socket, new Poco::Net::TCPServerParams)); + LOG_INFO(log, "Listening for connections to Keeper with secure protocol (tcp_secure): {}", address.toString()); +#else + UNUSED(port); + throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.", + ErrorCodes::SUPPORT_IS_DISABLED}; +#endif + }); + } + + for (auto & server : *servers) + server.start(); + + SCOPE_EXIT({ + LOG_INFO(log, "Shutting down."); + + global_context->shutdown(); + + LOG_DEBUG(log, "Waiting for current connections to Keeper to finish."); + int current_connections = 0; + for (auto & server : *servers) + { + server.stop(); + current_connections += server.currentConnections(); + } + + if (current_connections) + LOG_INFO(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections); + else + LOG_INFO(log, "Closed all listening sockets."); + + if (current_connections > 0) + current_connections = waitServersToFinish(*servers, config().getInt("shutdown_wait_unfinished", 5)); + + if (current_connections) + LOG_INFO(log, "Closed connections to Keeper. But {} remain. Probably some users cannot finish their connections after context shutdown.", current_connections); + else + LOG_INFO(log, "Closed connections to Keeper."); + + global_context->shutdownKeeperStorageDispatcher(); + + /// Wait server pool to avoid use-after-free of destroyed context in the handlers + server_pool.joinAll(); + + /** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available. + * At this moment, no one could own shared part of Context. + */ + global_context.reset(); + shared_context.reset(); + + LOG_DEBUG(log, "Destroyed global context."); + + if (current_connections) + { + LOG_INFO(log, "Will shutdown forcefully."); + forceShutdown(); + } + }); + + + buildLoggers(config(), logger()); + + LOG_INFO(log, "Ready for connections."); + + waitForTerminationRequest(); + + return Application::EXIT_OK; +} + + +void Keeper::logRevision() const +{ + Poco::Logger::root().information("Starting ClickHouse Keeper " + std::string{VERSION_STRING} + + " with revision " + std::to_string(ClickHouseRevision::getVersionRevision()) + + ", " + build_id_info + + ", PID " + std::to_string(getpid())); +} + + +} diff --git a/programs/keeper/Keeper.h b/programs/keeper/Keeper.h new file mode 100644 index 00000000000..f5b97dacf7d --- /dev/null +++ b/programs/keeper/Keeper.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include + +namespace Poco +{ + namespace Net + { + class ServerSocket; + } +} + +namespace DB +{ + +/// standalone clickhouse-keeper server (replacement for ZooKeeper). Uses the same +/// config as clickhouse-server. Serves requests on TCP ports with or without +/// SSL using ZooKeeper protocol. +class Keeper : public BaseDaemon, public IServer +{ +public: + using ServerApplication::run; + + Poco::Util::LayeredConfiguration & config() const override + { + return BaseDaemon::config(); + } + + Poco::Logger & logger() const override + { + return BaseDaemon::logger(); + } + + ContextMutablePtr context() const override + { + return global_context; + } + + bool isCancelled() const override + { + return BaseDaemon::isCancelled(); + } + + void defineOptions(Poco::Util::OptionSet & _options) override; + +protected: + void logRevision() const override; + + int run() override; + + void initialize(Application & self) override; + + void uninitialize() override; + + int main(const std::vector & args) override; + + std::string getDefaultConfigFileName() const override; + +private: + ContextMutablePtr global_context; + + Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const; + + using CreateServerFunc = std::function; + void createServer(const std::string & listen_host, const char * port_name, bool listen_try, CreateServerFunc && func) const; +}; + +} diff --git a/programs/keeper/clickhouse-keeper.cpp b/programs/keeper/clickhouse-keeper.cpp new file mode 100644 index 00000000000..baa673f79ee --- /dev/null +++ b/programs/keeper/clickhouse-keeper.cpp @@ -0,0 +1,6 @@ +int mainEntryClickHouseKeeper(int argc, char ** argv); + +int main(int argc_, char ** argv_) +{ + return mainEntryClickHouseKeeper(argc_, argv_); +} diff --git a/programs/keeper/keeper_config.xml b/programs/keeper/keeper_config.xml new file mode 100644 index 00000000000..ef218c9f2d7 --- /dev/null +++ b/programs/keeper/keeper_config.xml @@ -0,0 +1,81 @@ + + + + trace + /var/log/clickhouse-keeper/clickhouse-keeper.log + /var/log/clickhouse-keeper/clickhouse-keeper.err.log + + 1000M + 10 + + + + 4096 + + + 9181 + + + 1 + + /var/lib/clickhouse/coordination/logs + /var/lib/clickhouse/coordination/snapshots + + + 10000 + 30000 + information + + + + + + 1 + + + localhost + 44444 + + + + + + + + + + + + + /etc/clickhouse-keeper/server.crt + /etc/clickhouse-keeper/server.key + + /etc/clickhouse-keeper/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + diff --git a/programs/keeper/keeper_embedded.xml b/programs/keeper/keeper_embedded.xml new file mode 100644 index 00000000000..37edaedba80 --- /dev/null +++ b/programs/keeper/keeper_embedded.xml @@ -0,0 +1,21 @@ + + + trace + true + + + + 9181 + 1 + ./keeper_log + ./keeper_snapshot + + + + 1 + localhost + 44444 + + + + diff --git a/programs/library-bridge/CMakeLists.txt b/programs/library-bridge/CMakeLists.txt new file mode 100644 index 00000000000..0913c6e4a9a --- /dev/null +++ b/programs/library-bridge/CMakeLists.txt @@ -0,0 +1,25 @@ +set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES + library-bridge.cpp + LibraryInterface.cpp + LibraryBridge.cpp + Handlers.cpp + HandlerFactory.cpp + SharedLibraryHandler.cpp + SharedLibraryHandlerFactory.cpp +) + +if (OS_LINUX) + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") +endif () + +add_executable(clickhouse-library-bridge ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES}) + +target_link_libraries(clickhouse-library-bridge PRIVATE + daemon + dbms + bridge +) + +set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) + +install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) diff --git a/programs/library-bridge/HandlerFactory.cpp b/programs/library-bridge/HandlerFactory.cpp new file mode 100644 index 00000000000..9f53a24156f --- /dev/null +++ b/programs/library-bridge/HandlerFactory.cpp @@ -0,0 +1,23 @@ +#include "HandlerFactory.h" + +#include +#include +#include "Handlers.h" + + +namespace DB +{ + std::unique_ptr LibraryBridgeHandlerFactory::createRequestHandler(const HTTPServerRequest & request) + { + Poco::URI uri{request.getURI()}; + LOG_DEBUG(log, "Request URI: {}", uri.toString()); + + if (uri == "/ping" && request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET) + return std::make_unique(keep_alive_timeout); + + if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST) + return std::make_unique(keep_alive_timeout, getContext()); + + return nullptr; + } +} diff --git a/programs/library-bridge/HandlerFactory.h b/programs/library-bridge/HandlerFactory.h new file mode 100644 index 00000000000..93f0721bf01 --- /dev/null +++ b/programs/library-bridge/HandlerFactory.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ + +class SharedLibraryHandler; +using SharedLibraryHandlerPtr = std::shared_ptr; + +/// Factory for '/ping', '/' handlers. +class LibraryBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContext +{ +public: + LibraryBridgeHandlerFactory( + const std::string & name_, + size_t keep_alive_timeout_, + ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get(name_)) + , name(name_) + , keep_alive_timeout(keep_alive_timeout_) + { + } + + std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; + +private: + Poco::Logger * log; + std::string name; + size_t keep_alive_timeout; +}; + +} diff --git a/programs/library-bridge/Handlers.cpp b/programs/library-bridge/Handlers.cpp new file mode 100644 index 00000000000..6a1bfbbccb7 --- /dev/null +++ b/programs/library-bridge/Handlers.cpp @@ -0,0 +1,288 @@ +#include "Handlers.h" +#include "SharedLibraryHandlerFactory.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace +{ + std::shared_ptr parseColumns(std::string && column_string) + { + auto sample_block = std::make_shared(); + auto names_and_types = NamesAndTypesList::parse(column_string); + + for (const NameAndTypePair & column_data : names_and_types) + sample_block->insert({column_data.type, column_data.name}); + + return sample_block; + } + + std::vector parseIdsFromBinary(const std::string & ids_string) + { + ReadBufferFromString buf(ids_string); + std::vector ids; + readVectorBinary(ids, buf); + return ids; + } + + std::vector parseNamesFromBinary(const std::string & names_string) + { + ReadBufferFromString buf(names_string); + std::vector names; + readVectorBinary(names, buf); + return names; + } +} + + +void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +{ + LOG_TRACE(log, "Request URI: {}", request.getURI()); + HTMLForm params(request); + + if (!params.has("method")) + { + processError(response, "No 'method' in request URL"); + return; + } + + if (!params.has("dictionary_id")) + { + processError(response, "No 'dictionary_id in request URL"); + return; + } + + std::string method = params.get("method"); + std::string dictionary_id = params.get("dictionary_id"); + LOG_TRACE(log, "Library method: '{}', dictionary id: {}", method, dictionary_id); + + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); + + try + { + if (method == "libNew") + { + auto & read_buf = request.getStream(); + params.read(read_buf); + + if (!params.has("library_path")) + { + processError(response, "No 'library_path' in request URL"); + return; + } + + if (!params.has("library_settings")) + { + processError(response, "No 'library_settings' in request URL"); + return; + } + + std::string library_path = params.get("library_path"); + const auto & settings_string = params.get("library_settings"); + std::vector library_settings = parseNamesFromBinary(settings_string); + + /// Needed for library dictionary + if (!params.has("attributes_names")) + { + processError(response, "No 'attributes_names' in request URL"); + return; + } + + const auto & attributes_string = params.get("attributes_names"); + std::vector attributes_names = parseNamesFromBinary(attributes_string); + + /// Needed to parse block from binary string format + if (!params.has("sample_block")) + { + processError(response, "No 'sample_block' in request URL"); + return; + } + std::string sample_block_string = params.get("sample_block"); + + std::shared_ptr sample_block; + try + { + sample_block = parseColumns(std::move(sample_block_string)); + } + catch (const Exception & ex) + { + processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'"); + LOG_WARNING(log, ex.getStackTraceString()); + return; + } + + if (!params.has("null_values")) + { + processError(response, "No 'null_values' in request URL"); + return; + } + + ReadBufferFromString read_block_buf(params.get("null_values")); + auto format = FormatFactory::instance().getInput(FORMAT, read_block_buf, *sample_block, getContext(), DEFAULT_BLOCK_SIZE); + auto reader = std::make_shared(format); + auto sample_block_with_nulls = reader->read(); + + LOG_DEBUG(log, "Dictionary sample block with null values: {}", sample_block_with_nulls.dumpStructure()); + + SharedLibraryHandlerFactory::instance().create(dictionary_id, library_path, library_settings, sample_block_with_nulls, attributes_names); + writeStringBinary("1", out); + } + else if (method == "libClone") + { + if (!params.has("from_dictionary_id")) + { + processError(response, "No 'from_dictionary_id' in request URL"); + return; + } + + std::string from_dictionary_id = params.get("from_dictionary_id"); + LOG_TRACE(log, "Calling libClone from {} to {}", from_dictionary_id, dictionary_id); + SharedLibraryHandlerFactory::instance().clone(from_dictionary_id, dictionary_id); + writeStringBinary("1", out); + } + else if (method == "libDelete") + { + SharedLibraryHandlerFactory::instance().remove(dictionary_id); + writeStringBinary("1", out); + } + else if (method == "isModified") + { + auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id); + bool res = library_handler->isModified(); + writeStringBinary(std::to_string(res), out); + } + else if (method == "supportsSelectiveLoad") + { + auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id); + bool res = library_handler->supportsSelectiveLoad(); + writeStringBinary(std::to_string(res), out); + } + else if (method == "loadAll") + { + auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id); + const auto & sample_block = library_handler->getSampleBlock(); + auto input = library_handler->loadAll(); + + BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, getContext()); + copyData(*input, *output); + } + else if (method == "loadIds") + { + params.read(request.getStream()); + + if (!params.has("ids")) + { + processError(response, "No 'ids' in request URL"); + return; + } + + std::vector ids = parseIdsFromBinary(params.get("ids")); + auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id); + const auto & sample_block = library_handler->getSampleBlock(); + auto input = library_handler->loadIds(ids); + BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, getContext()); + copyData(*input, *output); + } + else if (method == "loadKeys") + { + if (!params.has("requested_block_sample")) + { + processError(response, "No 'requested_block_sample' in request URL"); + return; + } + + std::string requested_block_string = params.get("requested_block_sample"); + + std::shared_ptr requested_sample_block; + try + { + requested_sample_block = parseColumns(std::move(requested_block_string)); + } + catch (const Exception & ex) + { + processError(response, "Invalid 'requested_block' parameter in request body '" + ex.message() + "'"); + LOG_WARNING(log, ex.getStackTraceString()); + return; + } + + auto & read_buf = request.getStream(); + auto format = FormatFactory::instance().getInput(FORMAT, read_buf, *requested_sample_block, getContext(), DEFAULT_BLOCK_SIZE); + auto reader = std::make_shared(format); + auto block = reader->read(); + + auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id); + const auto & sample_block = library_handler->getSampleBlock(); + auto input = library_handler->loadKeys(block.getColumns()); + BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, getContext()); + copyData(*input, *output); + } + } + catch (...) + { + auto message = getCurrentExceptionMessage(true); + response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR, message); // can't call process_error, because of too soon response sending + + try + { + writeStringBinary(message, out); + out.finalize(); + } + catch (...) + { + tryLogCurrentException(log); + } + + tryLogCurrentException(log); + } + + try + { + out.finalize(); + } + catch (...) + { + tryLogCurrentException(log); + } +} + + +void LibraryRequestHandler::processError(HTTPServerResponse & response, const std::string & message) +{ + response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); + + if (!response.sent()) + *response.send() << message << std::endl; + + LOG_WARNING(log, message); +} + + +void PingHandler::handleRequest(HTTPServerRequest & /* request */, HTTPServerResponse & response) +{ + try + { + setResponseDefaultHeaders(response, keep_alive_timeout); + const char * data = "Ok.\n"; + response.sendBuffer(data, strlen(data)); + } + catch (...) + { + tryLogCurrentException("PingHandler"); + } +} + + +} diff --git a/programs/library-bridge/Handlers.h b/programs/library-bridge/Handlers.h new file mode 100644 index 00000000000..dac61d3a735 --- /dev/null +++ b/programs/library-bridge/Handlers.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include +#include "SharedLibraryHandler.h" + + +namespace DB +{ + + +/// Handler for requests to Library Dictionary Source, returns response in RowBinary format. +/// When a library dictionary source is created, it sends libNew request to library bridge (which is started on first +/// request to it, if it was not yet started). On this request a new sharedLibrayHandler is added to a +/// sharedLibraryHandlerFactory by a dictionary uuid. With libNew request come: library_path, library_settings, +/// names of dictionary attributes, sample block to parse block of null values, block of null values. Everything is +/// passed in binary format and is urlencoded. When dictionary is cloned, a new handler is created. +/// Each handler is unique to dictionary. +class LibraryRequestHandler : public HTTPRequestHandler, WithContext +{ +public: + + LibraryRequestHandler( + size_t keep_alive_timeout_, + ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get("LibraryRequestHandler")) + , keep_alive_timeout(keep_alive_timeout_) + { + } + + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + +private: + static constexpr inline auto FORMAT = "RowBinary"; + + void processError(HTTPServerResponse & response, const std::string & message); + + Poco::Logger * log; + size_t keep_alive_timeout; +}; + + +class PingHandler : public HTTPRequestHandler +{ +public: + explicit PingHandler(size_t keep_alive_timeout_) + : keep_alive_timeout(keep_alive_timeout_) + { + } + + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + +private: + const size_t keep_alive_timeout; +}; + +} diff --git a/programs/library-bridge/LibraryBridge.cpp b/programs/library-bridge/LibraryBridge.cpp new file mode 100644 index 00000000000..2e5d6041151 --- /dev/null +++ b/programs/library-bridge/LibraryBridge.cpp @@ -0,0 +1,17 @@ +#include "LibraryBridge.h" + +#pragma GCC diagnostic ignored "-Wmissing-declarations" +int mainEntryClickHouseLibraryBridge(int argc, char ** argv) +{ + DB::LibraryBridge app; + try + { + return app.run(argc, argv); + } + catch (...) + { + std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; + auto code = DB::getCurrentExceptionCode(); + return code ? code : 1; + } +} diff --git a/programs/library-bridge/LibraryBridge.h b/programs/library-bridge/LibraryBridge.h new file mode 100644 index 00000000000..9f2dafb89ab --- /dev/null +++ b/programs/library-bridge/LibraryBridge.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include "HandlerFactory.h" + + +namespace DB +{ + +class LibraryBridge : public IBridge +{ + +protected: + std::string bridgeName() const override + { + return "LibraryBridge"; + } + + HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const override + { + return std::make_shared("LibraryRequestHandlerFactory-factory", keep_alive_timeout, context); + } +}; + +} diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.cpp b/programs/library-bridge/LibraryInterface.cpp similarity index 97% rename from src/Dictionaries/LibraryDictionarySourceExternal.cpp rename to programs/library-bridge/LibraryInterface.cpp index 259d0a2846a..3975368c17f 100644 --- a/src/Dictionaries/LibraryDictionarySourceExternal.cpp +++ b/programs/library-bridge/LibraryInterface.cpp @@ -1,4 +1,5 @@ -#include "LibraryDictionarySourceExternal.h" +#include "LibraryInterface.h" + #include namespace diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.h b/programs/library-bridge/LibraryInterface.h similarity index 97% rename from src/Dictionaries/LibraryDictionarySourceExternal.h rename to programs/library-bridge/LibraryInterface.h index 3b92707d091..d23de59bbb1 100644 --- a/src/Dictionaries/LibraryDictionarySourceExternal.h +++ b/programs/library-bridge/LibraryInterface.h @@ -101,7 +101,7 @@ using RequestedIds = const VectorUInt64 *; using LibraryLoadIdsFunc = RawClickHouseLibraryTable (*)(LibraryData, LibrarySettings, RequestedColumnsNames, RequestedIds); using RequestedKeys = Table *; -/// There is no requested columns names for load keys func +/// There are no requested column names for load keys func using LibraryLoadKeysFunc = RawClickHouseLibraryTable (*)(LibraryData, LibrarySettings, RequestedKeys); using LibraryIsModifiedFunc = bool (*)(LibraryContext, LibrarySettings); diff --git a/programs/library-bridge/LibraryUtils.h b/programs/library-bridge/LibraryUtils.h new file mode 100644 index 00000000000..8ced8df1c48 --- /dev/null +++ b/programs/library-bridge/LibraryUtils.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include +#include +#include + +#include "LibraryInterface.h" + + +namespace DB +{ + +class CStringsHolder +{ + +public: + using Container = std::vector; + + explicit CStringsHolder(const Container & strings_pass) + { + strings_holder = strings_pass; + strings.size = strings_holder.size(); + + ptr_holder = std::make_unique(strings.size); + strings.data = ptr_holder.get(); + + size_t i = 0; + for (auto & str : strings_holder) + { + strings.data[i] = str.c_str(); + ++i; + } + } + + ClickHouseLibrary::CStrings strings; // will pass pointer to lib + +private: + std::unique_ptr ptr_holder = nullptr; + Container strings_holder; +}; + + +} diff --git a/programs/library-bridge/SharedLibraryHandler.cpp b/programs/library-bridge/SharedLibraryHandler.cpp new file mode 100644 index 00000000000..ab8cf2417c2 --- /dev/null +++ b/programs/library-bridge/SharedLibraryHandler.cpp @@ -0,0 +1,219 @@ +#include "SharedLibraryHandler.h" + +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int EXTERNAL_LIBRARY_ERROR; + extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; +} + + +SharedLibraryHandler::SharedLibraryHandler( + const std::string & library_path_, + const std::vector & library_settings, + const Block & sample_block_, + const std::vector & attributes_names_) + : library_path(library_path_) + , sample_block(sample_block_) + , attributes_names(attributes_names_) +{ + library = std::make_shared(library_path, RTLD_LAZY); + settings_holder = std::make_shared(CStringsHolder(library_settings)); + + auto lib_new = library->tryGet(ClickHouseLibrary::LIBRARY_CREATE_NEW_FUNC_NAME); + + if (lib_new) + lib_data = lib_new(&settings_holder->strings, ClickHouseLibrary::log); + else + throw Exception("Method libNew failed", ErrorCodes::EXTERNAL_LIBRARY_ERROR); +} + + +SharedLibraryHandler::SharedLibraryHandler(const SharedLibraryHandler & other) + : library_path{other.library_path} + , sample_block{other.sample_block} + , attributes_names{other.attributes_names} + , library{other.library} + , settings_holder{other.settings_holder} +{ + + auto lib_clone = library->tryGet(ClickHouseLibrary::LIBRARY_CLONE_FUNC_NAME); + + if (lib_clone) + { + lib_data = lib_clone(other.lib_data); + } + else + { + auto lib_new = library->tryGet(ClickHouseLibrary::LIBRARY_CREATE_NEW_FUNC_NAME); + + if (lib_new) + lib_data = lib_new(&settings_holder->strings, ClickHouseLibrary::log); + } +} + + +SharedLibraryHandler::~SharedLibraryHandler() +{ + auto lib_delete = library->tryGet(ClickHouseLibrary::LIBRARY_DELETE_FUNC_NAME); + + if (lib_delete) + lib_delete(lib_data); +} + + +bool SharedLibraryHandler::isModified() +{ + auto func_is_modified = library->tryGet(ClickHouseLibrary::LIBRARY_IS_MODIFIED_FUNC_NAME); + + if (func_is_modified) + return func_is_modified(lib_data, &settings_holder->strings); + + return true; +} + + +bool SharedLibraryHandler::supportsSelectiveLoad() +{ + auto func_supports_selective_load = library->tryGet(ClickHouseLibrary::LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME); + + if (func_supports_selective_load) + return func_supports_selective_load(lib_data, &settings_holder->strings); + + return true; +} + + +BlockInputStreamPtr SharedLibraryHandler::loadAll() +{ + auto columns_holder = std::make_unique(attributes_names.size()); + ClickHouseLibrary::CStrings columns{static_cast(columns_holder.get()), attributes_names.size()}; + for (size_t i = 0; i < attributes_names.size(); ++i) + columns.data[i] = attributes_names[i].c_str(); + + auto load_all_func = library->get(ClickHouseLibrary::LIBRARY_LOAD_ALL_FUNC_NAME); + auto data_new_func = library->get(ClickHouseLibrary::LIBRARY_DATA_NEW_FUNC_NAME); + auto data_delete_func = library->get(ClickHouseLibrary::LIBRARY_DATA_DELETE_FUNC_NAME); + + ClickHouseLibrary::LibraryData data_ptr = data_new_func(lib_data); + SCOPE_EXIT(data_delete_func(lib_data, data_ptr)); + + ClickHouseLibrary::RawClickHouseLibraryTable data = load_all_func(data_ptr, &settings_holder->strings, &columns); + auto block = dataToBlock(data); + + return std::make_shared(block); +} + + +BlockInputStreamPtr SharedLibraryHandler::loadIds(const std::vector & ids) +{ + const ClickHouseLibrary::VectorUInt64 ids_data{ext::bit_cast(ids.data()), ids.size()}; + + auto columns_holder = std::make_unique(attributes_names.size()); + ClickHouseLibrary::CStrings columns_pass{static_cast(columns_holder.get()), attributes_names.size()}; + + auto load_ids_func = library->get(ClickHouseLibrary::LIBRARY_LOAD_IDS_FUNC_NAME); + auto data_new_func = library->get(ClickHouseLibrary::LIBRARY_DATA_NEW_FUNC_NAME); + auto data_delete_func = library->get(ClickHouseLibrary::LIBRARY_DATA_DELETE_FUNC_NAME); + + ClickHouseLibrary::LibraryData data_ptr = data_new_func(lib_data); + SCOPE_EXIT(data_delete_func(lib_data, data_ptr)); + + ClickHouseLibrary::RawClickHouseLibraryTable data = load_ids_func(data_ptr, &settings_holder->strings, &columns_pass, &ids_data); + auto block = dataToBlock(data); + + return std::make_shared(block); +} + + +BlockInputStreamPtr SharedLibraryHandler::loadKeys(const Columns & key_columns) +{ + auto holder = std::make_unique(key_columns.size()); + std::vector> column_data_holders; + + for (size_t i = 0; i < key_columns.size(); ++i) + { + auto cell_holder = std::make_unique(key_columns[i]->size()); + + for (size_t j = 0; j < key_columns[i]->size(); ++j) + { + auto data_ref = key_columns[i]->getDataAt(j); + + cell_holder[j] = ClickHouseLibrary::Field{ + .data = static_cast(data_ref.data), + .size = data_ref.size}; + } + + holder[i] = ClickHouseLibrary::Row{ + .data = static_cast(cell_holder.get()), + .size = key_columns[i]->size()}; + + column_data_holders.push_back(std::move(cell_holder)); + } + + ClickHouseLibrary::Table request_cols{ + .data = static_cast(holder.get()), + .size = key_columns.size()}; + + auto load_keys_func = library->get(ClickHouseLibrary::LIBRARY_LOAD_KEYS_FUNC_NAME); + auto data_new_func = library->get(ClickHouseLibrary::LIBRARY_DATA_NEW_FUNC_NAME); + auto data_delete_func = library->get(ClickHouseLibrary::LIBRARY_DATA_DELETE_FUNC_NAME); + + ClickHouseLibrary::LibraryData data_ptr = data_new_func(lib_data); + SCOPE_EXIT(data_delete_func(lib_data, data_ptr)); + + ClickHouseLibrary::RawClickHouseLibraryTable data = load_keys_func(data_ptr, &settings_holder->strings, &request_cols); + auto block = dataToBlock(data); + + return std::make_shared(block); +} + + +Block SharedLibraryHandler::dataToBlock(const ClickHouseLibrary::RawClickHouseLibraryTable data) +{ + if (!data) + throw Exception("LibraryDictionarySource: No data returned", ErrorCodes::EXTERNAL_LIBRARY_ERROR); + + const auto * columns_received = static_cast(data); + if (columns_received->error_code) + throw Exception( + "LibraryDictionarySource: Returned error: " + std::to_string(columns_received->error_code) + " " + (columns_received->error_string ? columns_received->error_string : ""), + ErrorCodes::EXTERNAL_LIBRARY_ERROR); + + MutableColumns columns = sample_block.cloneEmptyColumns(); + + for (size_t col_n = 0; col_n < columns_received->size; ++col_n) + { + if (columns.size() != columns_received->data[col_n].size) + throw Exception( + "LibraryDictionarySource: Returned unexpected number of columns: " + std::to_string(columns_received->data[col_n].size) + ", must be " + std::to_string(columns.size()), + ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH); + + for (size_t row_n = 0; row_n < columns_received->data[col_n].size; ++row_n) + { + const auto & field = columns_received->data[col_n].data[row_n]; + if (!field.data) + { + /// sample_block contains null_value (from config) inside corresponding column + const auto & col = sample_block.getByPosition(row_n); + columns[row_n]->insertFrom(*(col.column), 0); + } + else + { + const auto & size = field.size; + columns[row_n]->insertData(static_cast(field.data), size); + } + } + } + + return sample_block.cloneWithColumns(std::move(columns)); +} + +} diff --git a/programs/library-bridge/SharedLibraryHandler.h b/programs/library-bridge/SharedLibraryHandler.h new file mode 100644 index 00000000000..fa476995e32 --- /dev/null +++ b/programs/library-bridge/SharedLibraryHandler.h @@ -0,0 +1,56 @@ +#pragma once + +#include +#include +#include +#include "LibraryUtils.h" + + +namespace DB +{ + +/// A class that manages all operations with library dictionary. +/// Every library dictionary source has its own object of this class, accessed by UUID. +class SharedLibraryHandler +{ + +public: + SharedLibraryHandler( + const std::string & library_path_, + const std::vector & library_settings, + const Block & sample_block_, + const std::vector & attributes_names_); + + SharedLibraryHandler(const SharedLibraryHandler & other); + + SharedLibraryHandler & operator=(const SharedLibraryHandler & other) = delete; + + ~SharedLibraryHandler(); + + BlockInputStreamPtr loadAll(); + + BlockInputStreamPtr loadIds(const std::vector & ids); + + BlockInputStreamPtr loadKeys(const Columns & key_columns); + + bool isModified(); + + bool supportsSelectiveLoad(); + + const Block & getSampleBlock() { return sample_block; } + +private: + Block dataToBlock(const ClickHouseLibrary::RawClickHouseLibraryTable data); + + std::string library_path; + const Block sample_block; + std::vector attributes_names; + + SharedLibraryPtr library; + std::shared_ptr settings_holder; + void * lib_data; +}; + +using SharedLibraryHandlerPtr = std::shared_ptr; + +} diff --git a/programs/library-bridge/SharedLibraryHandlerFactory.cpp b/programs/library-bridge/SharedLibraryHandlerFactory.cpp new file mode 100644 index 00000000000..05494c313c4 --- /dev/null +++ b/programs/library-bridge/SharedLibraryHandlerFactory.cpp @@ -0,0 +1,67 @@ +#include "SharedLibraryHandlerFactory.h" + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +SharedLibraryHandlerPtr SharedLibraryHandlerFactory::get(const std::string & dictionary_id) +{ + std::lock_guard lock(mutex); + auto library_handler = library_handlers.find(dictionary_id); + + if (library_handler != library_handlers.end()) + return library_handler->second; + + return nullptr; +} + + +void SharedLibraryHandlerFactory::create( + const std::string & dictionary_id, + const std::string & library_path, + const std::vector & library_settings, + const Block & sample_block, + const std::vector & attributes_names) +{ + std::lock_guard lock(mutex); + library_handlers[dictionary_id] = std::make_shared(library_path, library_settings, sample_block, attributes_names); +} + + +void SharedLibraryHandlerFactory::clone(const std::string & from_dictionary_id, const std::string & to_dictionary_id) +{ + std::lock_guard lock(mutex); + auto from_library_handler = library_handlers.find(from_dictionary_id); + + /// This is not supposed to happen as libClone is called from copy constructor of LibraryDictionarySource + /// object, and shared library handler of from_dictionary is removed only in its destructor. + /// And if for from_dictionary there was no shared library handler, it would have received and exception in + /// its constructor, so no libClone would be made from it. + if (from_library_handler == library_handlers.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No shared library handler found"); + + /// libClone method will be called in copy constructor + library_handlers[to_dictionary_id] = std::make_shared(*from_library_handler->second); +} + + +void SharedLibraryHandlerFactory::remove(const std::string & dictionary_id) +{ + std::lock_guard lock(mutex); + /// libDelete is called in destructor. + library_handlers.erase(dictionary_id); +} + + +SharedLibraryHandlerFactory & SharedLibraryHandlerFactory::instance() +{ + static SharedLibraryHandlerFactory ret; + return ret; +} + +} diff --git a/programs/library-bridge/SharedLibraryHandlerFactory.h b/programs/library-bridge/SharedLibraryHandlerFactory.h new file mode 100644 index 00000000000..473d90618a2 --- /dev/null +++ b/programs/library-bridge/SharedLibraryHandlerFactory.h @@ -0,0 +1,37 @@ +#pragma once + +#include "SharedLibraryHandler.h" +#include +#include + + +namespace DB +{ + +/// Each library dictionary source has unique UUID. When clone() method is called, a new UUID is generated. +/// There is a unique mapping from diciotnary UUID to sharedLibraryHandler. +class SharedLibraryHandlerFactory final : private boost::noncopyable +{ +public: + static SharedLibraryHandlerFactory & instance(); + + SharedLibraryHandlerPtr get(const std::string & dictionary_id); + + void create( + const std::string & dictionary_id, + const std::string & library_path, + const std::vector & library_settings, + const Block & sample_block, + const std::vector & attributes_names); + + void clone(const std::string & from_dictionary_id, const std::string & to_dictionary_id); + + void remove(const std::string & dictionary_id); + +private: + /// map: dict_id -> sharedLibraryHandler + std::unordered_map library_handlers; + std::mutex mutex; +}; + +} diff --git a/programs/library-bridge/library-bridge.cpp b/programs/library-bridge/library-bridge.cpp new file mode 100644 index 00000000000..5fff2ffe525 --- /dev/null +++ b/programs/library-bridge/library-bridge.cpp @@ -0,0 +1,3 @@ +int mainEntryClickHouseLibraryBridge(int argc, char ** argv); +int main(int argc_, char ** argv_) { return mainEntryClickHouseLibraryBridge(argc_, argv_); } + diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 2909b838c84..f48e8d4d0a0 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -41,9 +42,9 @@ #include #include #include - #include +namespace fs = std::filesystem; namespace DB { @@ -71,11 +72,11 @@ void LocalServer::initialize(Poco::Util::Application & self) Poco::Util::Application::initialize(self); /// Load config files if exists - if (config().has("config-file") || Poco::File("config.xml").exists()) + if (config().has("config-file") || fs::exists("config.xml")) { const auto config_path = config().getString("config-file", "config.xml"); ConfigProcessor config_processor(config_path, false, true); - config_processor.setConfigPath(Poco::Path(config_path).makeParent().toString()); + config_processor.setConfigPath(fs::path(config_path).parent_path()); auto loaded_config = config_processor.loadConfig(); config_processor.savePreprocessedConfig(loaded_config, loaded_config.configuration->getString("path", ".")); config().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false); @@ -99,9 +100,9 @@ void LocalServer::initialize(Poco::Util::Application & self) } } -void LocalServer::applyCmdSettings(Context & context) +void LocalServer::applyCmdSettings(ContextMutablePtr context) { - context.applySettingsChanges(cmd_settings.changes()); + context->applySettingsChanges(cmd_settings.changes()); } /// If path is specified and not empty, will try to setup server environment and load existing metadata @@ -176,7 +177,7 @@ void LocalServer::tryInitPath() } -static void attachSystemTables(const Context & context) +static void attachSystemTables(ContextPtr context) { DatabasePtr system_database = DatabaseCatalog::instance().tryGetDatabase(DatabaseCatalog::SYSTEM_DATABASE); if (!system_database) @@ -211,7 +212,7 @@ try } shared_context = Context::createShared(); - global_context = std::make_unique(Context::createGlobal(shared_context.get())); + global_context = Context::createGlobal(shared_context.get()); global_context->makeGlobalContext(); global_context->setApplicationType(Context::ApplicationType::LOCAL); tryInitPath(); @@ -274,9 +275,9 @@ try * if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons. */ std::string default_database = config().getString("default_database", "_local"); - DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, *global_context)); + DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, global_context)); global_context->setCurrentDatabase(default_database); - applyCmdOptions(*global_context); + applyCmdOptions(global_context); if (config().has("path")) { @@ -286,17 +287,17 @@ try status.emplace(path + "status", StatusFile::write_full_info); LOG_DEBUG(log, "Loading metadata from {}", path); - Poco::File(path + "data/").createDirectories(); - Poco::File(path + "metadata/").createDirectories(); - loadMetadataSystem(*global_context); - attachSystemTables(*global_context); - loadMetadata(*global_context); + fs::create_directories(fs::path(path) / "data/"); + fs::create_directories(fs::path(path) / "metadata/"); + loadMetadataSystem(global_context); + attachSystemTables(global_context); + loadMetadata(global_context); DatabaseCatalog::instance().loadDatabases(); LOG_DEBUG(log, "Loaded metadata."); } else if (!config().has("no-system-tables")) { - attachSystemTables(*global_context); + attachSystemTables(global_context); } processQueries(); @@ -375,23 +376,46 @@ void LocalServer::processQueries() /// we can't mutate global global_context (can lead to races, as it was already passed to some background threads) /// so we can't reuse it safely as a query context and need a copy here - auto context = Context(*global_context); + auto context = Context::createCopy(global_context); - context.makeSessionContext(); - context.makeQueryContext(); + context->makeSessionContext(); + context->makeQueryContext(); - context.setUser("default", "", Poco::Net::SocketAddress{}); - context.setCurrentQueryId(""); + context->setUser("default", "", Poco::Net::SocketAddress{}); + context->setCurrentQueryId(""); applyCmdSettings(context); /// Use the same query_id (and thread group) for all queries CurrentThread::QueryScope query_scope_holder(context); + ///Set progress show + progress_bar.need_render_progress = config().getBool("progress", false); + + if (progress_bar.need_render_progress) + { + context->setProgressCallback([&](const Progress & value) + { + if (!progress_bar.updateProgress(progress, value)) + { + // Just a keep-alive update. + return; + } + progress_bar.writeProgress(progress, watch.elapsed()); + }); + } + bool echo_queries = config().hasOption("echo") || config().hasOption("verbose"); std::exception_ptr exception; for (const auto & query : queries) { + watch.restart(); + progress.reset(); + progress_bar.show_progress_bar = false; + progress_bar.written_progress_chars = 0; + progress_bar.written_first_block = false; + + ReadBufferFromString read_buf(query); WriteBufferFromFileDescriptor write_buf(STDOUT_FILENO); @@ -455,7 +479,7 @@ void LocalServer::setupUsers() { ConfigurationPtr users_config; - if (config().has("users_config") || config().has("config-file") || Poco::File("config.xml").exists()) + if (config().has("users_config") || config().has("config-file") || fs::exists("config.xml")) { const auto users_config_path = config().getString("users_config", config().getString("config-file", "config.xml")); ConfigProcessor config_processor(users_config_path); @@ -548,6 +572,7 @@ void LocalServer::init(int argc, char ** argv) ("ignore-error", "do not stop processing if a query failed") ("no-system-tables", "do not attach system tables (better startup time)") ("version,V", "print version information and exit") + ("progress", "print progress of queries execution") ; cmd_settings.addProgramOptions(description); @@ -597,6 +622,8 @@ void LocalServer::init(int argc, char ** argv) if (options.count("stacktrace")) config().setBool("stacktrace", true); + if (options.count("progress")) + config().setBool("progress", true); if (options.count("echo")) config().setBool("echo", true); if (options.count("verbose")) @@ -618,9 +645,9 @@ void LocalServer::init(int argc, char ** argv) argsToConfig(arguments, config(), 100); } -void LocalServer::applyCmdOptions(Context & context) +void LocalServer::applyCmdOptions(ContextMutablePtr context) { - context.setDefaultFormat(config().getString("output-format", config().getString("format", "TSV"))); + context->setDefaultFormat(config().getString("output-format", config().getString("format", "TSV"))); applyCmdSettings(context); } diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 02778bd86cb..cf8886d9652 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -1,13 +1,13 @@ #pragma once -#include -#include #include #include #include -#include +#include #include - +#include +#include +#include namespace DB { @@ -36,18 +36,22 @@ private: std::string getInitialCreateTableQuery(); void tryInitPath(); - void applyCmdOptions(Context & context); - void applyCmdSettings(Context & context); + void applyCmdOptions(ContextMutablePtr context); + void applyCmdSettings(ContextMutablePtr context); void processQueries(); void setupUsers(); void cleanup(); + protected: SharedContextHolder shared_context; - std::unique_ptr global_context; + ContextMutablePtr global_context; /// Settings specified via command line args Settings cmd_settings; + ProgressBar progress_bar; + Progress progress; + Stopwatch watch; std::optional temporary_directory_to_delete; }; diff --git a/programs/main.cpp b/programs/main.cpp index cbb22b7a87b..ccdf4d50fb4 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -55,6 +55,9 @@ int mainEntryClickHouseObfuscator(int argc, char ** argv); #if ENABLE_CLICKHOUSE_GIT_IMPORT int mainEntryClickHouseGitImport(int argc, char ** argv); #endif +#if ENABLE_CLICKHOUSE_KEEPER +int mainEntryClickHouseKeeper(int argc, char ** argv); +#endif #if ENABLE_CLICKHOUSE_INSTALL int mainEntryClickHouseInstall(int argc, char ** argv); int mainEntryClickHouseStart(int argc, char ** argv); @@ -112,6 +115,9 @@ std::pair clickhouse_applications[] = #if ENABLE_CLICKHOUSE_GIT_IMPORT {"git-import", mainEntryClickHouseGitImport}, #endif +#if ENABLE_CLICKHOUSE_KEEPER + {"keeper", mainEntryClickHouseKeeper}, +#endif #if ENABLE_CLICKHOUSE_INSTALL {"install", mainEntryClickHouseInstall}, {"start", mainEntryClickHouseStart}, diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index aea70ba0986..f68b255158c 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -365,16 +365,20 @@ static void transformFixedString(const UInt8 * src, UInt8 * dst, size_t size, UI } } -static void transformUUID(const UInt128 & src, UInt128 & dst, UInt64 seed) +static void transformUUID(const UUID & src_uuid, UUID & dst_uuid, UInt64 seed) { + const UInt128 & src = src_uuid.toUnderType(); + UInt128 & dst = dst_uuid.toUnderType(); + SipHash hash; hash.update(seed); - hash.update(reinterpret_cast(&src), sizeof(UInt128)); + hash.update(reinterpret_cast(&src), sizeof(UUID)); /// Saving version and variant from an old UUID hash.get128(reinterpret_cast(&dst)); - dst.high = (dst.high & 0x1fffffffffffffffull) | (src.high & 0xe000000000000000ull); - dst.low = (dst.low & 0xffffffffffff0fffull) | (src.low & 0x000000000000f000ull); + + dst.items[1] = (dst.items[1] & 0x1fffffffffffffffull) | (src.items[1] & 0xe000000000000000ull); + dst.items[0] = (dst.items[0] & 0xffffffffffff0fffull) | (src.items[0] & 0x000000000000f000ull); } class FixedStringModel : public IModel @@ -426,10 +430,10 @@ public: ColumnPtr generate(const IColumn & column) override { - const ColumnUInt128 & src_column = assert_cast(column); + const ColumnUUID & src_column = assert_cast(column); const auto & src_data = src_column.getData(); - auto res_column = ColumnUInt128::create(); + auto res_column = ColumnUUID::create(); auto & res_data = res_column->getData(); res_data.resize(src_data.size()); @@ -1129,8 +1133,8 @@ try } SharedContextHolder shared_context = Context::createShared(); - Context context = Context::createGlobal(shared_context.get()); - context.makeGlobalContext(); + auto context = Context::createGlobal(shared_context.get()); + context->makeGlobalContext(); ReadBufferFromFileDescriptor file_in(STDIN_FILENO); WriteBufferFromFileDescriptor file_out(STDOUT_FILENO); @@ -1152,7 +1156,7 @@ try if (!silent) std::cerr << "Training models\n"; - BlockInputStreamPtr input = context.getInputFormat(input_format, file_in, header, max_block_size); + BlockInputStreamPtr input = context->getInputFormat(input_format, file_in, header, max_block_size); input->readPrefix(); while (Block block = input->read()) @@ -1179,8 +1183,8 @@ try file_in.seek(0, SEEK_SET); - BlockInputStreamPtr input = context.getInputFormat(input_format, file_in, header, max_block_size); - BlockOutputStreamPtr output = context.getOutputStreamParallelIfPossible(output_format, file_out, header); + BlockInputStreamPtr input = context->getInputFormat(input_format, file_in, header, max_block_size); + BlockOutputStreamPtr output = context->getOutputStreamParallelIfPossible(output_format, file_out, header); if (processed_rows + source_rows > limit) input = std::make_shared(input, limit - processed_rows, 0); diff --git a/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt index 11864354619..7b232f2b5dc 100644 --- a/programs/odbc-bridge/CMakeLists.txt +++ b/programs/odbc-bridge/CMakeLists.txt @@ -24,12 +24,14 @@ add_executable(clickhouse-odbc-bridge ${CLICKHOUSE_ODBC_BRIDGE_SOURCES}) target_link_libraries(clickhouse-odbc-bridge PRIVATE daemon dbms + bridge clickhouse_parsers - Poco::Data - Poco::Data::ODBC + nanodbc + unixodbc ) set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) +target_compile_options (clickhouse-odbc-bridge PRIVATE -Wno-reserved-id-macro -Wno-keyword-macro) if (USE_GDB_ADD_INDEX) add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM) diff --git a/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp index 14fa734f246..f4f575bb33d 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.cpp +++ b/programs/odbc-bridge/ColumnInfoHandler.cpp @@ -2,29 +2,36 @@ #if USE_ODBC -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include "getIdentifierQuote.h" -# include "validateODBCConnectionString.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "getIdentifierQuote.h" +#include "validateODBCConnectionString.h" +#include "ODBCConnectionFactory.h" + +#include +#include -# define POCO_SQL_ODBC_CLASS Poco::Data::ODBC namespace DB { + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; +} + namespace { DataTypePtr getDataType(SQLSMALLINT type) @@ -59,6 +66,7 @@ namespace } } + void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) { HTMLForm params(request, request.getStream()); @@ -77,88 +85,79 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ process_error("No 'table' param in request URL"); return; } + if (!params.has("connection_string")) { process_error("No 'connection_string' in request URL"); return; } + std::string schema_name; std::string table_name = params.get("table"); std::string connection_string = params.get("connection_string"); if (params.has("schema")) - { schema_name = params.get("schema"); - LOG_TRACE(log, "Will fetch info for table '{}'", schema_name + "." + table_name); - } - else - LOG_TRACE(log, "Will fetch info for table '{}'", table_name); + LOG_TRACE(log, "Got connection str '{}'", connection_string); try { const bool external_table_functions_use_nulls = Poco::NumberParser::parseBool(params.get("external_table_functions_use_nulls", "false")); - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - SQLHDBC hdbc = session.dbc().handle(); + auto connection = ODBCConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), + getContext()->getSettingsRef().odbc_bridge_connection_pool_size); - SQLHSTMT hstmt = nullptr; + nanodbc::catalog catalog(connection->get()); + std::string catalog_name; - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLAllocStmt(hdbc, &hstmt))) - throw POCO_SQL_ODBC_CLASS::ODBCException("Could not allocate connection handle."); - - SCOPE_EXIT(SQLFreeStmt(hstmt, SQL_DROP)); - - const auto & context_settings = context.getSettingsRef(); - - /// TODO Why not do SQLColumns instead? - std::string name = schema_name.empty() ? backQuoteIfNeed(table_name) : backQuoteIfNeed(schema_name) + "." + backQuoteIfNeed(table_name); - WriteBufferFromOwnString buf; - std::string input = "SELECT * FROM " + name + " WHERE 1 = 0"; - ParserQueryWithOutput parser(input.data() + input.size()); - ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", context_settings.max_query_size, context_settings.max_parser_depth); - - IAST::FormatSettings settings(buf, true); - settings.always_quote_identifiers = true; - settings.identifier_quoting_style = getQuotingStyle(hdbc); - select->format(settings); - std::string query = buf.str(); - - LOG_TRACE(log, "Inferring structure with query '{}'", query); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(POCO_SQL_ODBC_CLASS::SQLPrepare(hstmt, reinterpret_cast(query.data()), query.size()))) - throw POCO_SQL_ODBC_CLASS::DescriptorException(session.dbc()); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLExecute(hstmt))) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - SQLSMALLINT cols = 0; - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLNumResultCols(hstmt, &cols))) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - /// TODO cols not checked - - NamesAndTypesList columns; - for (SQLSMALLINT ncol = 1; ncol <= cols; ++ncol) + /// In XDBC tables it is allowed to pass either database_name or schema_name in table definion, but not both of them. + /// They both are passed as 'schema' parameter in request URL, so it is not clear whether it is database_name or schema_name passed. + /// If it is schema_name then we know that database is added in odbc.ini. But if we have database_name as 'schema', + /// it is not guaranteed. For nanodbc database_name must be either in odbc.ini or passed as catalog_name. + auto get_columns = [&]() { - SQLSMALLINT type = 0; - /// TODO Why 301? - SQLCHAR column_name[301]; - - SQLSMALLINT is_nullable; - const auto result = POCO_SQL_ODBC_CLASS::SQLDescribeCol(hstmt, ncol, column_name, sizeof(column_name), nullptr, &type, nullptr, nullptr, &is_nullable); - if (POCO_SQL_ODBC_CLASS::Utility::isError(result)) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - auto column_type = getDataType(type); - if (external_table_functions_use_nulls && is_nullable == SQL_NULLABLE) + nanodbc::catalog::tables tables = catalog.find_tables(table_name, /* type = */ "", /* schema = */ "", /* catalog = */ schema_name); + if (tables.next()) { - column_type = std::make_shared(column_type); + catalog_name = tables.table_catalog(); + LOG_TRACE(log, "Will fetch info for table '{}.{}'", catalog_name, table_name); + return catalog.find_columns(/* column = */ "", table_name, /* schema = */ "", catalog_name); } - columns.emplace_back(reinterpret_cast(column_name), std::move(column_type)); + tables = catalog.find_tables(table_name, /* type = */ "", /* schema = */ schema_name); + if (tables.next()) + { + catalog_name = tables.table_catalog(); + LOG_TRACE(log, "Will fetch info for table '{}.{}.{}'", catalog_name, schema_name, table_name); + return catalog.find_columns(/* column = */ "", table_name, schema_name, catalog_name); + } + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table {} not found", schema_name.empty() ? table_name : schema_name + '.' + table_name); + }; + + nanodbc::catalog::columns columns_definition = get_columns(); + + NamesAndTypesList columns; + while (columns_definition.next()) + { + SQLSMALLINT type = columns_definition.sql_data_type(); + std::string column_name = columns_definition.column_name(); + + bool is_nullable = columns_definition.nullable() == SQL_NULLABLE; + + auto column_type = getDataType(type); + + if (external_table_functions_use_nulls && is_nullable == SQL_NULLABLE) + column_type = std::make_shared(column_type); + + columns.emplace_back(column_name, std::move(column_type)); } + if (columns.empty()) + throw Exception("Columns definition was not returned", ErrorCodes::LOGICAL_ERROR); + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try { diff --git a/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h index 9b5b470b31d..bc976f54aee 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.h +++ b/programs/odbc-bridge/ColumnInfoHandler.h @@ -2,24 +2,23 @@ #if USE_ODBC -# include -# include -# include +#include +#include +#include +#include +#include -# include -/** The structure of the table is taken from the query "SELECT * FROM table WHERE 1=0". - * TODO: It would be much better to utilize ODBC methods dedicated for columns description. - * If there is no such table, an exception is thrown. - */ namespace DB { -class ODBCColumnsInfoHandler : public HTTPRequestHandler +class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext { public: - ODBCColumnsInfoHandler(size_t keep_alive_timeout_, Context & context_) - : log(&Poco::Logger::get("ODBCColumnsInfoHandler")), keep_alive_timeout(keep_alive_timeout_), context(context_) + ODBCColumnsInfoHandler(size_t keep_alive_timeout_, ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get("ODBCColumnsInfoHandler")) + , keep_alive_timeout(keep_alive_timeout_) { } @@ -28,7 +27,6 @@ public: private: Poco::Logger * log; size_t keep_alive_timeout; - Context & context; }; } diff --git a/programs/odbc-bridge/HandlerFactory.cpp b/programs/odbc-bridge/HandlerFactory.cpp index 9ac48af4ace..49984453d33 100644 --- a/programs/odbc-bridge/HandlerFactory.cpp +++ b/programs/odbc-bridge/HandlerFactory.cpp @@ -8,7 +8,7 @@ namespace DB { -std::unique_ptr HandlerFactory::createRequestHandler(const HTTPServerRequest & request) +std::unique_ptr ODBCBridgeHandlerFactory::createRequestHandler(const HTTPServerRequest & request) { Poco::URI uri{request.getURI()}; LOG_TRACE(log, "Request URI: {}", uri.toString()); @@ -21,26 +21,26 @@ std::unique_ptr HandlerFactory::createRequestHandler(const H if (uri.getPath() == "/columns_info") #if USE_ODBC - return std::make_unique(keep_alive_timeout, context); + return std::make_unique(keep_alive_timeout, getContext()); #else return nullptr; #endif else if (uri.getPath() == "/identifier_quote") #if USE_ODBC - return std::make_unique(keep_alive_timeout, context); + return std::make_unique(keep_alive_timeout, getContext()); #else return nullptr; #endif else if (uri.getPath() == "/schema_allowed") #if USE_ODBC - return std::make_unique(keep_alive_timeout, context); + return std::make_unique(keep_alive_timeout, getContext()); #else return nullptr; #endif else if (uri.getPath() == "/write") - return std::make_unique(pool_map, keep_alive_timeout, context, "write"); + return std::make_unique(keep_alive_timeout, getContext(), "write"); else - return std::make_unique(pool_map, keep_alive_timeout, context, "read"); + return std::make_unique(keep_alive_timeout, getContext(), "read"); } return nullptr; } diff --git a/programs/odbc-bridge/HandlerFactory.h b/programs/odbc-bridge/HandlerFactory.h index 5dce6f02ecd..ffbbe3670af 100644 --- a/programs/odbc-bridge/HandlerFactory.h +++ b/programs/odbc-bridge/HandlerFactory.h @@ -1,32 +1,28 @@ #pragma once -#include +#include #include #include "ColumnInfoHandler.h" #include "IdentifierQuoteHandler.h" #include "MainHandler.h" #include "SchemaAllowedHandler.h" - #include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#include -#pragma GCC diagnostic pop - namespace DB { /** Factory for '/ping', '/', '/columns_info', '/identifier_quote', '/schema_allowed' handlers. * Also stores Session pools for ODBC connections */ -class HandlerFactory : public HTTPRequestHandlerFactory +class ODBCBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContext { public: - HandlerFactory(const std::string & name_, size_t keep_alive_timeout_, Context & context_) - : log(&Poco::Logger::get(name_)), name(name_), keep_alive_timeout(keep_alive_timeout_), context(context_) + ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get(name_)) + , name(name_) + , keep_alive_timeout(keep_alive_timeout_) { - pool_map = std::make_shared(); } std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; @@ -35,7 +31,6 @@ private: Poco::Logger * log; std::string name; size_t keep_alive_timeout; - Context & context; - std::shared_ptr pool_map; }; + } diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/programs/odbc-bridge/IdentifierQuoteHandler.cpp index 5060d37c479..124a5c420f8 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.cpp +++ b/programs/odbc-bridge/IdentifierQuoteHandler.cpp @@ -2,23 +2,20 @@ #if USE_ODBC -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include "getIdentifierQuote.h" -# include "validateODBCConnectionString.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "getIdentifierQuote.h" +#include "validateODBCConnectionString.h" +#include "ODBCConnectionFactory.h" -# define POCO_SQL_ODBC_CLASS Poco::Data::ODBC namespace DB { @@ -44,10 +41,12 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ try { std::string connection_string = params.get("connection_string"); - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - SQLHDBC hdbc = session.dbc().handle(); - auto identifier = getIdentifierQuote(hdbc); + auto connection = ODBCConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), + getContext()->getSettingsRef().odbc_bridge_connection_pool_size); + + auto identifier = getIdentifierQuote(connection->get()); WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h index dad88c72ad8..ef3806fd802 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.h +++ b/programs/odbc-bridge/IdentifierQuoteHandler.h @@ -11,11 +11,13 @@ namespace DB { -class IdentifierQuoteHandler : public HTTPRequestHandler +class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext { public: - IdentifierQuoteHandler(size_t keep_alive_timeout_, Context &) - : log(&Poco::Logger::get("IdentifierQuoteHandler")), keep_alive_timeout(keep_alive_timeout_) + IdentifierQuoteHandler(size_t keep_alive_timeout_, ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get("IdentifierQuoteHandler")) + , keep_alive_timeout(keep_alive_timeout_) { } diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index 079fc371ab4..ffa636e8b49 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -23,13 +23,9 @@ #include -#if USE_ODBC -#include -#define POCO_SQL_ODBC_CLASS Poco::Data::ODBC -#endif - namespace DB { + namespace { std::unique_ptr parseColumns(std::string && column_string) @@ -42,37 +38,6 @@ namespace } } -using PocoSessionPoolConstructor = std::function()>; -/** Is used to adjust max size of default Poco thread pool. See issue #750 - * Acquire the lock, resize pool and construct new Session. - */ -static std::shared_ptr createAndCheckResizePocoSessionPool(PocoSessionPoolConstructor pool_constr) -{ - static std::mutex mutex; - - Poco::ThreadPool & pool = Poco::ThreadPool::defaultPool(); - - /// NOTE: The lock don't guarantee that external users of the pool don't change its capacity - std::unique_lock lock(mutex); - - if (pool.available() == 0) - pool.addCapacity(2 * std::max(pool.capacity(), 1)); - - return pool_constr(); -} - -ODBCHandler::PoolPtr ODBCHandler::getPool(const std::string & connection_str) -{ - std::lock_guard lock(mutex); - if (!pool_map->count(connection_str)) - { - pool_map->emplace(connection_str, createAndCheckResizePocoSessionPool([connection_str] - { - return std::make_shared("ODBC", validateODBCConnectionString(connection_str)); - })); - } - return pool_map->at(connection_str); -} void ODBCHandler::processError(HTTPServerResponse & response, const std::string & message) { @@ -82,12 +47,14 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string LOG_WARNING(log, message); } + void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) { HTMLForm params(request); + LOG_TRACE(log, "Request URI: {}", request.getURI()); + if (mode == "read") params.read(request.getStream()); - LOG_TRACE(log, "Request URI: {}", request.getURI()); if (mode == "read" && !params.has("query")) { @@ -95,11 +62,6 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse return; } - if (!params.has("columns")) - { - processError(response, "No 'columns' in request URL"); - return; - } if (!params.has("connection_string")) { @@ -107,6 +69,16 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse return; } + if (!params.has("sample_block")) + { + processError(response, "No 'sample_block' in request URL"); + return; + } + + std::string format = params.get("format", "RowBinary"); + std::string connection_string = params.get("connection_string"); + LOG_TRACE(log, "Connection string: '{}'", connection_string); + UInt64 max_block_size = DEFAULT_BLOCK_SIZE; if (params.has("max_block_size")) { @@ -119,28 +91,27 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse max_block_size = parse(max_block_size_str); } - std::string columns = params.get("columns"); + std::string sample_block_string = params.get("sample_block"); std::unique_ptr sample_block; try { - sample_block = parseColumns(std::move(columns)); + sample_block = parseColumns(std::move(sample_block_string)); } catch (const Exception & ex) { - processError(response, "Invalid 'columns' parameter in request body '" + ex.message() + "'"); - LOG_WARNING(log, ex.getStackTraceString()); + processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'"); + LOG_ERROR(log, ex.getStackTraceString()); return; } - std::string format = params.get("format", "RowBinary"); - - std::string connection_string = params.get("connection_string"); - LOG_TRACE(log, "Connection string: '{}'", connection_string); - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try { + auto connection = ODBCConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), + getContext()->getSettingsRef().odbc_bridge_connection_pool_size); + if (mode == "write") { if (!params.has("db_name")) @@ -159,15 +130,12 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse auto quoting_style = IdentifierQuotingStyle::None; #if USE_ODBC - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - quoting_style = getQuotingStyle(session.dbc().handle()); + quoting_style = getQuotingStyle(connection->get()); #endif - - auto pool = getPool(connection_string); auto & read_buf = request.getStream(); - auto input_format = FormatFactory::instance().getInput(format, read_buf, *sample_block, context, max_block_size); + auto input_format = FormatFactory::instance().getInput(format, read_buf, *sample_block, getContext(), max_block_size); auto input_stream = std::make_shared(input_format); - ODBCBlockOutputStream output_stream(pool->get(), db_name, table_name, *sample_block, quoting_style); + ODBCBlockOutputStream output_stream(std::move(connection), db_name, table_name, *sample_block, getContext(), quoting_style); copyData(*input_stream, output_stream); writeStringBinary("Ok.", out); } @@ -176,9 +144,8 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse std::string query = params.get("query"); LOG_TRACE(log, "Query: {}", query); - BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, context); - auto pool = getPool(connection_string); - ODBCBlockInputStream inp(pool->get(), query, *sample_block, max_block_size); + BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, getContext()); + ODBCBlockInputStream inp(std::move(connection), query, *sample_block, max_block_size); copyData(inp, *writer); } } diff --git a/programs/odbc-bridge/MainHandler.h b/programs/odbc-bridge/MainHandler.h index e237ede5814..bc0fca8b9a5 100644 --- a/programs/odbc-bridge/MainHandler.h +++ b/programs/odbc-bridge/MainHandler.h @@ -1,14 +1,13 @@ #pragma once -#include +#include #include - #include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#include -#pragma GCC diagnostic pop + +#include +#include + namespace DB { @@ -17,20 +16,16 @@ namespace DB * and also query in request body * response in RowBinary format */ -class ODBCHandler : public HTTPRequestHandler +class ODBCHandler : public HTTPRequestHandler, WithContext { public: - using PoolPtr = std::shared_ptr; - using PoolMap = std::unordered_map; - - ODBCHandler(std::shared_ptr pool_map_, + ODBCHandler( size_t keep_alive_timeout_, - Context & context_, + ContextPtr context_, const String & mode_) - : log(&Poco::Logger::get("ODBCHandler")) - , pool_map(pool_map_) + : WithContext(context_) + , log(&Poco::Logger::get("ODBCHandler")) , keep_alive_timeout(keep_alive_timeout_) - , context(context_) , mode(mode_) { } @@ -40,14 +35,11 @@ public: private: Poco::Logger * log; - std::shared_ptr pool_map; size_t keep_alive_timeout; - Context & context; String mode; static inline std::mutex mutex; - PoolPtr getPool(const std::string & connection_str); void processError(HTTPServerResponse & response, const std::string & message); }; diff --git a/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp index b8a4209ac94..c695c8db9cf 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -1,5 +1,7 @@ #include "ODBCBlockInputStream.h" #include +#include +#include #include #include #include @@ -14,137 +16,142 @@ namespace DB { namespace ErrorCodes { - extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH; extern const int UNKNOWN_TYPE; } ODBCBlockInputStream::ODBCBlockInputStream( - Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) - : session{session_} - , statement{(this->session << query_str, Poco::Data::Keywords::now)} - , result{statement} - , iterator{result.begin()} + nanodbc::ConnectionHolderPtr connection, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) + : log(&Poco::Logger::get("ODBCBlockInputStream")) , max_block_size{max_block_size_} - , log(&Poco::Logger::get("ODBCBlockInputStream")) + , query(query_str) { - if (sample_block.columns() != result.columnCount()) - throw Exception{"RecordSet contains " + toString(result.columnCount()) + " columns while " + toString(sample_block.columns()) - + " expected", - ErrorCodes::NUMBER_OF_COLUMNS_DOESNT_MATCH}; - description.init(sample_block); -} - - -namespace -{ - using ValueType = ExternalResultDescription::ValueType; - - void insertValue(IColumn & column, const ValueType type, const Poco::Dynamic::Var & value) - { - switch (type) - { - case ValueType::vtUInt8: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtUInt16: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtUInt32: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtUInt64: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtInt8: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtInt16: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtInt32: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtInt64: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtFloat32: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtFloat64: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtString: - assert_cast(column).insert(value.convert()); - break; - case ValueType::vtDate: - { - Poco::DateTime date = value.convert(); - assert_cast(column).insertValue(UInt16{LocalDate(date.year(), date.month(), date.day()).getDayNum()}); - break; - } - case ValueType::vtDateTime: - { - Poco::DateTime datetime = value.convert(); - assert_cast(column).insertValue(DateLUT::instance().makeDateTime( - datetime.year(), datetime.month(), datetime.day(), datetime.hour(), datetime.minute(), datetime.second())); - break; - } - case ValueType::vtUUID: - assert_cast(column).insert(parse(value.convert())); - break; - default: - throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE); - } - } - - void insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); } + result = execute(connection->get(), NANODBC_TEXT(query)); } Block ODBCBlockInputStream::readImpl() { - if (iterator == result.end()) - return {}; - - MutableColumns columns(description.sample_block.columns()); - for (const auto i : ext::range(0, columns.size())) - columns[i] = description.sample_block.getByPosition(i).column->cloneEmpty(); + if (finished) + return Block(); + MutableColumns columns(description.sample_block.cloneEmptyColumns()); size_t num_rows = 0; - while (iterator != result.end()) + + while (true) { - Poco::Data::Row & row = *iterator; - - for (const auto idx : ext::range(0, row.fieldCount())) + if (!result.next()) { - /// TODO This is extremely slow. - const Poco::Dynamic::Var & value = row[idx]; + finished = true; + break; + } - if (!value.isEmpty()) + for (int idx = 0; idx < result.columns(); ++idx) + { + const auto & sample = description.sample_block.getByPosition(idx); + + if (!result.is_null(idx)) { - if (description.types[idx].second) + bool is_nullable = description.types[idx].second; + + if (is_nullable) { ColumnNullable & column_nullable = assert_cast(*columns[idx]); - insertValue(column_nullable.getNestedColumn(), description.types[idx].first, value); + const auto & data_type = assert_cast(*sample.type); + insertValue(column_nullable.getNestedColumn(), data_type.getNestedType(), description.types[idx].first, result, idx); column_nullable.getNullMapData().emplace_back(0); } else - insertValue(*columns[idx], description.types[idx].first, value); + { + insertValue(*columns[idx], sample.type, description.types[idx].first, result, idx); + } } else - insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column); + insertDefaultValue(*columns[idx], *sample.column); } - ++iterator; - - ++num_rows; - if (num_rows == max_block_size) + if (++num_rows == max_block_size) break; } return description.sample_block.cloneWithColumns(std::move(columns)); } + +void ODBCBlockInputStream::insertValue( + IColumn & column, const DataTypePtr data_type, const ValueType type, nanodbc::result & row, size_t idx) +{ + switch (type) + { + case ValueType::vtUInt8: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtUInt16: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtUInt32: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtUInt64: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtInt8: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtInt16: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtInt32: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtInt64: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtFloat32: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtFloat64: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtFixedString:[[fallthrough]]; + case ValueType::vtString: + assert_cast(column).insert(row.get(idx)); + break; + case ValueType::vtUUID: + { + auto value = row.get(idx); + assert_cast(column).insert(parse(value.data(), value.size())); + break; + } + case ValueType::vtDate: + assert_cast(column).insertValue(UInt16{LocalDate{row.get(idx)}.getDayNum()}); + break; + case ValueType::vtDateTime: + { + auto value = row.get(idx); + ReadBufferFromString in(value); + time_t time = 0; + readDateTimeText(time, in); + if (time < 0) + time = 0; + assert_cast(column).insertValue(time); + break; + } + case ValueType::vtDateTime64:[[fallthrough]]; + case ValueType::vtDecimal32: [[fallthrough]]; + case ValueType::vtDecimal64: [[fallthrough]]; + case ValueType::vtDecimal128: [[fallthrough]]; + case ValueType::vtDecimal256: + { + auto value = row.get(idx); + ReadBufferFromString istr(value); + data_type->getDefaultSerialization()->deserializeWholeText(column, istr, FormatSettings{}); + break; + } + default: + throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE); + } +} + } diff --git a/programs/odbc-bridge/ODBCBlockInputStream.h b/programs/odbc-bridge/ODBCBlockInputStream.h index 13491e05822..26aa766dbcc 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.h +++ b/programs/odbc-bridge/ODBCBlockInputStream.h @@ -3,10 +3,8 @@ #include #include #include -#include -#include -#include #include +#include "ODBCConnectionFactory.h" namespace DB @@ -15,25 +13,32 @@ namespace DB class ODBCBlockInputStream final : public IBlockInputStream { public: - ODBCBlockInputStream( - Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); + ODBCBlockInputStream(nanodbc::ConnectionHolderPtr connection, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); String getName() const override { return "ODBC"; } Block getHeader() const override { return description.sample_block.cloneEmpty(); } private: + using QueryResult = std::shared_ptr; + using ValueType = ExternalResultDescription::ValueType; + Block readImpl() override; - Poco::Data::Session session; - Poco::Data::Statement statement; - Poco::Data::RecordSet result; - Poco::Data::RecordSet::Iterator iterator; + static void insertValue(IColumn & column, const DataTypePtr data_type, const ValueType type, nanodbc::result & row, size_t idx); + static void insertDefaultValue(IColumn & column, const IColumn & sample_column) + { + column.insertFrom(sample_column, 0); + } + + Poco::Logger * log; const UInt64 max_block_size; ExternalResultDescription description; - Poco::Logger * log; + nanodbc::result result; + String query; + bool finished = false; }; } diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.cpp b/programs/odbc-bridge/ODBCBlockOutputStream.cpp index db3c9441419..dc965b3b2a7 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockOutputStream.cpp @@ -1,5 +1,6 @@ #include "ODBCBlockOutputStream.h" +#include #include #include #include @@ -8,16 +9,14 @@ #include #include #include "getIdentifierQuote.h" +#include +#include +#include namespace DB { -namespace ErrorCodes -{ - extern const int UNKNOWN_TYPE; -} - namespace { using ValueType = ExternalResultDescription::ValueType; @@ -39,70 +38,21 @@ namespace query.IAST::format(settings); return buf.str(); } - - std::string getQuestionMarks(size_t n) - { - std::string result = "("; - for (size_t i = 0; i < n; ++i) - { - if (i > 0) - result += ","; - result += "?"; - } - return result + ")"; - } - - Poco::Dynamic::Var getVarFromField(const Field & field, const ValueType type) - { - switch (type) - { - case ValueType::vtUInt8: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtUInt16: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtUInt32: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtUInt64: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtInt8: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtInt16: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtInt32: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtInt64: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtFloat32: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtFloat64: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtString: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtDate: - return Poco::Dynamic::Var(LocalDate(DayNum(field.get())).toString()).convert(); - case ValueType::vtDateTime: - return Poco::Dynamic::Var(DateLUT::instance().timeToString(time_t(field.get()))).convert(); - case ValueType::vtUUID: - return Poco::Dynamic::Var(UUID(field.get()).toUnderType().toHexString()).convert(); - default: - throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE); - - } - __builtin_unreachable(); - } } -ODBCBlockOutputStream::ODBCBlockOutputStream(Poco::Data::Session && session_, +ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::ConnectionHolderPtr connection_, const std::string & remote_database_name_, const std::string & remote_table_name_, const Block & sample_block_, + ContextPtr local_context_, IdentifierQuotingStyle quoting_) - : session(session_) + : log(&Poco::Logger::get("ODBCBlockOutputStream")) + , connection(std::move(connection_)) , db_name(remote_database_name_) , table_name(remote_table_name_) , sample_block(sample_block_) + , local_context(local_context_) , quoting(quoting_) - , log(&Poco::Logger::get("ODBCBlockOutputStream")) { description.init(sample_block); } @@ -114,28 +64,12 @@ Block ODBCBlockOutputStream::getHeader() const void ODBCBlockOutputStream::write(const Block & block) { - ColumnsWithTypeAndName columns; - for (size_t i = 0; i < block.columns(); ++i) - columns.push_back({block.getColumns()[i], sample_block.getDataTypes()[i], sample_block.getNames()[i]}); + WriteBufferFromOwnString values_buf; + auto writer = FormatFactory::instance().getOutputStream("Values", values_buf, sample_block, local_context); + writer->write(block); - std::vector row_to_insert(block.columns()); - Poco::Data::Statement statement(session << getInsertQuery(db_name, table_name, columns, quoting) + getQuestionMarks(block.columns())); - for (size_t i = 0; i < block.columns(); ++i) - statement.addBind(Poco::Data::Keywords::use(row_to_insert[i])); - - for (size_t i = 0; i < block.rows(); ++i) - { - for (size_t col_idx = 0; col_idx < block.columns(); ++col_idx) - { - Field val; - columns[col_idx].column->get(i, val); - if (val.isNull()) - row_to_insert[col_idx] = Poco::Dynamic::Var(); - else - row_to_insert[col_idx] = getVarFromField(val, description.types[col_idx].first); - } - statement.execute(); - } + std::string query = getInsertQuery(db_name, table_name, block.getColumnsWithTypeAndName(), quoting) + values_buf.str(); + execute(connection->get(), query); } } diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.h b/programs/odbc-bridge/ODBCBlockOutputStream.h index 39e1d6f77ac..c370a0a9c7b 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.h +++ b/programs/odbc-bridge/ODBCBlockOutputStream.h @@ -2,30 +2,41 @@ #include #include -#include #include #include +#include +#include "ODBCConnectionFactory.h" + namespace DB { + class ODBCBlockOutputStream : public IBlockOutputStream { + public: - ODBCBlockOutputStream(Poco::Data::Session && session_, const std::string & remote_database_name_, - const std::string & remote_table_name_, const Block & sample_block_, IdentifierQuotingStyle quoting); + ODBCBlockOutputStream( + nanodbc::ConnectionHolderPtr connection_, + const std::string & remote_database_name_, + const std::string & remote_table_name_, + const Block & sample_block_, + ContextPtr local_context_, + IdentifierQuotingStyle quoting); Block getHeader() const override; void write(const Block & block) override; private: - Poco::Data::Session session; + Poco::Logger * log; + + nanodbc::ConnectionHolderPtr connection; std::string db_name; std::string table_name; Block sample_block; + ContextPtr local_context; IdentifierQuotingStyle quoting; ExternalResultDescription description; - Poco::Logger * log; }; } diff --git a/programs/odbc-bridge/ODBCBridge.cpp b/programs/odbc-bridge/ODBCBridge.cpp index 8869a2639c1..0deefe46014 100644 --- a/programs/odbc-bridge/ODBCBridge.cpp +++ b/programs/odbc-bridge/ODBCBridge.cpp @@ -1,244 +1,4 @@ #include "ODBCBridge.h" -#include "HandlerFactory.h" - -#include -#include -#include -#include - -#if USE_ODBC -// It doesn't make much sense to build this bridge without ODBC, but we still do this. -# include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int ARGUMENT_OUT_OF_BOUND; -} - -namespace -{ - Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log) - { - Poco::Net::SocketAddress socket_address; - try - { - socket_address = Poco::Net::SocketAddress(host, port); - } - catch (const Poco::Net::DNSException & e) - { - const auto code = e.code(); - if (code == EAI_FAMILY -#if defined(EAI_ADDRFAMILY) - || code == EAI_ADDRFAMILY -#endif - ) - { - LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. If it is an IPv6 address and your host has disabled IPv6, then consider to specify IPv4 address to listen in element of configuration file. Example: 0.0.0.0", host, e.code(), e.message()); - } - - throw; - } - return socket_address; - } - - Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, Poco::Logger * log) - { - auto address = makeSocketAddress(host, port, log); -#if POCO_VERSION < 0x01080000 - socket.bind(address, /* reuseAddress = */ true); -#else - socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ false); -#endif - - socket.listen(/* backlog = */ 64); - - return address; - } -} - -void ODBCBridge::handleHelp(const std::string &, const std::string &) -{ - Poco::Util::HelpFormatter help_formatter(options()); - help_formatter.setCommand(commandName()); - help_formatter.setHeader("HTTP-proxy for odbc requests"); - help_formatter.setUsage("--http-port "); - help_formatter.format(std::cerr); - - stopOptionsProcessing(); -} - - -void ODBCBridge::defineOptions(Poco::Util::OptionSet & options) -{ - options.addOption(Poco::Util::Option("http-port", "", "port to listen").argument("http-port", true).binding("http-port")); - options.addOption( - Poco::Util::Option("listen-host", "", "hostname or address to listen, default 127.0.0.1").argument("listen-host").binding("listen-host")); - options.addOption( - Poco::Util::Option("http-timeout", "", "http timeout for socket, default 1800").argument("http-timeout").binding("http-timeout")); - - options.addOption(Poco::Util::Option("max-server-connections", "", "max connections to server, default 1024") - .argument("max-server-connections") - .binding("max-server-connections")); - options.addOption(Poco::Util::Option("keep-alive-timeout", "", "keepalive timeout, default 10") - .argument("keep-alive-timeout") - .binding("keep-alive-timeout")); - - options.addOption(Poco::Util::Option("log-level", "", "sets log level, default info").argument("log-level").binding("logger.level")); - - options.addOption( - Poco::Util::Option("log-path", "", "log path for all logs, default console").argument("log-path").binding("logger.log")); - - options.addOption(Poco::Util::Option("err-log-path", "", "err log path for all logs, default no") - .argument("err-log-path") - .binding("logger.errorlog")); - - options.addOption(Poco::Util::Option("stdout-path", "", "stdout log path, default console") - .argument("stdout-path") - .binding("logger.stdout")); - - options.addOption(Poco::Util::Option("stderr-path", "", "stderr log path, default console") - .argument("stderr-path") - .binding("logger.stderr")); - - using Me = std::decay_t; - options.addOption(Poco::Util::Option("help", "", "produce this help message") - .binding("help") - .callback(Poco::Util::OptionCallback(this, &Me::handleHelp))); - - ServerApplication::defineOptions(options); // NOLINT Don't need complex BaseDaemon's .xml config -} - -void ODBCBridge::initialize(Application & self) -{ - BaseDaemon::closeFDs(); - is_help = config().has("help"); - - if (is_help) - return; - - config().setString("logger", "ODBCBridge"); - - /// Redirect stdout, stderr to specified files. - /// Some libraries and sanitizers write to stderr in case of errors. - const auto stdout_path = config().getString("logger.stdout", ""); - if (!stdout_path.empty()) - { - if (!freopen(stdout_path.c_str(), "a+", stdout)) - throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path); - - /// Disable buffering for stdout. - setbuf(stdout, nullptr); - } - const auto stderr_path = config().getString("logger.stderr", ""); - if (!stderr_path.empty()) - { - if (!freopen(stderr_path.c_str(), "a+", stderr)) - throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path); - - /// Disable buffering for stderr. - setbuf(stderr, nullptr); - } - - buildLoggers(config(), logger(), self.commandName()); - - BaseDaemon::logRevision(); - - log = &logger(); - hostname = config().getString("listen-host", "127.0.0.1"); - port = config().getUInt("http-port"); - if (port > 0xFFFF) - throw Exception("Out of range 'http-port': " + std::to_string(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND); - - http_timeout = config().getUInt("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT); - max_server_connections = config().getUInt("max-server-connections", 1024); - keep_alive_timeout = config().getUInt("keep-alive-timeout", 10); - - initializeTerminationAndSignalProcessing(); - -#if USE_ODBC - // It doesn't make much sense to build this bridge without ODBC, but we - // still do this. - Poco::Data::ODBC::Connector::registerConnector(); -#endif - - ServerApplication::initialize(self); // NOLINT -} - -void ODBCBridge::uninitialize() -{ - BaseDaemon::uninitialize(); -} - -int ODBCBridge::main(const std::vector & /*args*/) -{ - if (is_help) - return Application::EXIT_OK; - - registerFormats(); - - LOG_INFO(log, "Starting up"); - Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, hostname, port, log); - socket.setReceiveTimeout(http_timeout); - socket.setSendTimeout(http_timeout); - Poco::ThreadPool server_pool(3, max_server_connections); - Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams; - http_params->setTimeout(http_timeout); - http_params->setKeepAliveTimeout(keep_alive_timeout); - - auto shared_context = Context::createShared(); - Context context(Context::createGlobal(shared_context.get())); - context.makeGlobalContext(); - - if (config().has("query_masking_rules")) - { - SensitiveDataMasker::setInstance(std::make_unique(config(), "query_masking_rules")); - } - - auto server = HTTPServer( - context, - std::make_shared("ODBCRequestHandlerFactory-factory", keep_alive_timeout, context), - server_pool, - socket, - http_params); - server.start(); - - LOG_INFO(log, "Listening http://{}", address.toString()); - - SCOPE_EXIT({ - LOG_DEBUG(log, "Received termination signal."); - LOG_DEBUG(log, "Waiting for current connections to close."); - server.stop(); - for (size_t count : ext::range(1, 6)) - { - if (server.currentConnections() == 0) - break; - LOG_DEBUG(log, "Waiting for {} connections, try {}", server.currentConnections(), count); - std::this_thread::sleep_for(std::chrono::milliseconds(1000)); - } - }); - - waitForTerminationRequest(); - return Application::EXIT_OK; -} -} #pragma GCC diagnostic ignored "-Wmissing-declarations" int mainEntryClickHouseODBCBridge(int argc, char ** argv) diff --git a/programs/odbc-bridge/ODBCBridge.h b/programs/odbc-bridge/ODBCBridge.h index 9a0d37fa0f9..b17051dce91 100644 --- a/programs/odbc-bridge/ODBCBridge.h +++ b/programs/odbc-bridge/ODBCBridge.h @@ -2,38 +2,25 @@ #include #include -#include +#include +#include "HandlerFactory.h" + namespace DB { -/** Class represents clickhouse-odbc-bridge server, which listen - * incoming HTTP POST and GET requests on specified port and host. - * Has two handlers '/' for all incoming POST requests to ODBC driver - * and /ping for GET request about service status - */ -class ODBCBridge : public BaseDaemon + +class ODBCBridge : public IBridge { -public: - void defineOptions(Poco::Util::OptionSet & options) override; protected: - void initialize(Application & self) override; + std::string bridgeName() const override + { + return "ODBCBridge"; + } - void uninitialize() override; - - int main(const std::vector & args) override; - -private: - void handleHelp(const std::string &, const std::string &); - - bool is_help; - std::string hostname; - size_t port; - size_t http_timeout; - std::string log_level; - size_t max_server_connections; - size_t keep_alive_timeout; - - Poco::Logger * log; + HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const override + { + return std::make_shared("ODBCRequestHandlerFactory-factory", keep_alive_timeout, context); + } }; } diff --git a/programs/odbc-bridge/ODBCConnectionFactory.h b/programs/odbc-bridge/ODBCConnectionFactory.h new file mode 100644 index 00000000000..41ed5f1b31f --- /dev/null +++ b/programs/odbc-bridge/ODBCConnectionFactory.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int NO_FREE_CONNECTION; +} +} + +namespace nanodbc +{ + +using ConnectionPtr = std::unique_ptr; +using Pool = BorrowedObjectPool; +using PoolPtr = std::shared_ptr; + +class ConnectionHolder +{ +public: + ConnectionHolder(PoolPtr pool_, ConnectionPtr connection_) : pool(pool_), connection(std::move(connection_)) {} + + ConnectionHolder(const ConnectionHolder & other) = delete; + + ~ConnectionHolder() { pool->returnObject(std::move(connection)); } + + nanodbc::connection & get() const + { + assert(connection != nullptr); + return *connection; + } + +private: + PoolPtr pool; + ConnectionPtr connection; +}; + +using ConnectionHolderPtr = std::unique_ptr; +} + + +namespace DB +{ + +static constexpr inline auto ODBC_CONNECT_TIMEOUT = 100; +static constexpr inline auto ODBC_POOL_WAIT_TIMEOUT = 10000; + +class ODBCConnectionFactory final : private boost::noncopyable +{ +public: + static ODBCConnectionFactory & instance() + { + static ODBCConnectionFactory ret; + return ret; + } + + nanodbc::ConnectionHolderPtr get(const std::string & connection_string, size_t pool_size) + { + std::lock_guard lock(mutex); + + if (!factory.count(connection_string)) + factory.emplace(std::make_pair(connection_string, std::make_shared(pool_size))); + + auto & pool = factory[connection_string]; + + nanodbc::ConnectionPtr connection; + auto connection_available = pool->tryBorrowObject(connection, []() { return nullptr; }, ODBC_POOL_WAIT_TIMEOUT); + + if (!connection_available) + throw Exception("Unable to fetch connection within the timeout", ErrorCodes::NO_FREE_CONNECTION); + + try + { + if (!connection || !connection->connected()) + connection = std::make_unique(connection_string, ODBC_CONNECT_TIMEOUT); + } + catch (...) + { + pool->returnObject(std::move(connection)); + } + + return std::make_unique(factory[connection_string], std::move(connection)); + } + +private: + /// [connection_settings_string] -> [connection_pool] + using PoolFactory = std::unordered_map; + PoolFactory factory; + std::mutex mutex; +}; + +} diff --git a/programs/odbc-bridge/SchemaAllowedHandler.cpp b/programs/odbc-bridge/SchemaAllowedHandler.cpp index d4a70db61f4..3a20148780d 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.cpp +++ b/programs/odbc-bridge/SchemaAllowedHandler.cpp @@ -2,33 +2,26 @@ #if USE_ODBC -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include "validateODBCConnectionString.h" +#include +#include +#include +#include +#include +#include +#include "validateODBCConnectionString.h" +#include "ODBCConnectionFactory.h" +#include +#include -# define POCO_SQL_ODBC_CLASS Poco::Data::ODBC namespace DB { namespace { - bool isSchemaAllowed(SQLHDBC hdbc) + bool isSchemaAllowed(nanodbc::connection & connection) { - SQLUINTEGER value; - SQLSMALLINT value_length = sizeof(value); - SQLRETURN r = POCO_SQL_ODBC_CLASS::SQLGetInfo(hdbc, SQL_SCHEMA_USAGE, &value, sizeof(value), &value_length); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(r)) - throw POCO_SQL_ODBC_CLASS::ConnectionException(hdbc); - - return value != 0; + uint32_t result = connection.get_info(SQL_SCHEMA_USAGE); + return result != 0; } } @@ -55,10 +48,12 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer try { std::string connection_string = params.get("connection_string"); - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - SQLHDBC hdbc = session.dbc().handle(); - bool result = isSchemaAllowed(hdbc); + auto connection = ODBCConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), + getContext()->getSettingsRef().odbc_bridge_connection_pool_size); + + bool result = isSchemaAllowed(connection->get()); WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try diff --git a/programs/odbc-bridge/SchemaAllowedHandler.h b/programs/odbc-bridge/SchemaAllowedHandler.h index 91eddf67803..d7b922ed05b 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.h +++ b/programs/odbc-bridge/SchemaAllowedHandler.h @@ -1,22 +1,25 @@ #pragma once +#include #include - #include #if USE_ODBC + namespace DB { class Context; /// This handler establishes connection to database, and retrieves whether schema is allowed. -class SchemaAllowedHandler : public HTTPRequestHandler +class SchemaAllowedHandler : public HTTPRequestHandler, WithContext { public: - SchemaAllowedHandler(size_t keep_alive_timeout_, Context &) - : log(&Poco::Logger::get("SchemaAllowedHandler")), keep_alive_timeout(keep_alive_timeout_) + SchemaAllowedHandler(size_t keep_alive_timeout_, ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get("SchemaAllowedHandler")) + , keep_alive_timeout(keep_alive_timeout_) { } diff --git a/programs/odbc-bridge/getIdentifierQuote.cpp b/programs/odbc-bridge/getIdentifierQuote.cpp index 15b3749d37d..586e3c4e5dd 100644 --- a/programs/odbc-bridge/getIdentifierQuote.cpp +++ b/programs/odbc-bridge/getIdentifierQuote.cpp @@ -2,11 +2,9 @@ #if USE_ODBC -# include -# include -# include - -# define POCO_SQL_ODBC_CLASS Poco::Data::ODBC +#include +#include +#include namespace DB @@ -17,33 +15,27 @@ namespace ErrorCodes extern const int ILLEGAL_TYPE_OF_ARGUMENT; } -std::string getIdentifierQuote(SQLHDBC hdbc) + +std::string getIdentifierQuote(nanodbc::connection & connection) { - std::string identifier; - - SQLSMALLINT t; - SQLRETURN r = POCO_SQL_ODBC_CLASS::SQLGetInfo(hdbc, SQL_IDENTIFIER_QUOTE_CHAR, nullptr, 0, &t); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(r)) - throw POCO_SQL_ODBC_CLASS::ConnectionException(hdbc); - - if (t > 0) + std::string quote; + try { - // I have no idea, why to add '2' here, got from: contrib/poco/Data/ODBC/src/ODBCStatementImpl.cpp:60 (SQL_DRIVER_NAME) - identifier.resize(static_cast(t) + 2); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(POCO_SQL_ODBC_CLASS::SQLGetInfo( - hdbc, SQL_IDENTIFIER_QUOTE_CHAR, &identifier[0], SQLSMALLINT((identifier.length() - 1) * sizeof(identifier[0])), &t))) - throw POCO_SQL_ODBC_CLASS::ConnectionException(hdbc); - - identifier.resize(static_cast(t)); + quote = connection.get_info(SQL_IDENTIFIER_QUOTE_CHAR); } - return identifier; + catch (...) + { + LOG_WARNING(&Poco::Logger::get("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false)); + return "\""; + } + + return quote; } -IdentifierQuotingStyle getQuotingStyle(SQLHDBC hdbc) + +IdentifierQuotingStyle getQuotingStyle(nanodbc::connection & connection) { - auto identifier_quote = getIdentifierQuote(hdbc); + auto identifier_quote = getIdentifierQuote(connection); if (identifier_quote.length() == 0) return IdentifierQuotingStyle::None; else if (identifier_quote[0] == '`') diff --git a/programs/odbc-bridge/getIdentifierQuote.h b/programs/odbc-bridge/getIdentifierQuote.h index 0fb4c3bddb1..7f7156eff82 100644 --- a/programs/odbc-bridge/getIdentifierQuote.h +++ b/programs/odbc-bridge/getIdentifierQuote.h @@ -2,20 +2,19 @@ #if USE_ODBC -# include -# include -# include - -# include - +#include +#include +#include #include +#include + namespace DB { -std::string getIdentifierQuote(SQLHDBC hdbc); +std::string getIdentifierQuote(nanodbc::connection & connection); -IdentifierQuotingStyle getQuotingStyle(SQLHDBC hdbc); +IdentifierQuotingStyle getQuotingStyle(nanodbc::connection & connection); } diff --git a/programs/server/.gitignore b/programs/server/.gitignore index b774776e4be..ddc480e4b29 100644 --- a/programs/server/.gitignore +++ b/programs/server/.gitignore @@ -1,8 +1,11 @@ -/access -/dictionaries_lib -/flags -/format_schemas +/metadata /metadata_dropped +/data +/store +/access +/flags +/dictionaries_lib +/format_schemas /preprocessed_configs /shadow /tmp diff --git a/programs/server/CMakeLists.txt b/programs/server/CMakeLists.txt index 198d9081168..f7f76fdb450 100644 --- a/programs/server/CMakeLists.txt +++ b/programs/server/CMakeLists.txt @@ -19,6 +19,7 @@ set (CLICKHOUSE_SERVER_LINK clickhouse_storages_system clickhouse_table_functions string_utils + jemalloc ${LINK_RESOURCE_LIB} @@ -28,34 +29,6 @@ set (CLICKHOUSE_SERVER_LINK clickhouse_program_add(server) -install(FILES config.xml users.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-server COMPONENT clickhouse) +install(FILES config.xml users.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-server" COMPONENT clickhouse) -# TODO We actually need this on Mac, FreeBSD. -if (OS_LINUX) - # Embed default config files as a resource into the binary. - # This is needed for two purposes: - # 1. Allow to run the binary without download of any other files. - # 2. Allow to implement "sudo clickhouse install" tool. - - foreach(RESOURCE_FILE config.xml users.xml embedded.xml play.html) - set(RESOURCE_OBJ ${RESOURCE_FILE}.o) - set(RESOURCE_OBJS ${RESOURCE_OBJS} ${RESOURCE_OBJ}) - - # https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake - add_custom_command(OUTPUT ${RESOURCE_OBJ} - COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${RESOURCE_FILE} ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ} - COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents - ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ} ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}) - - set_source_files_properties(${RESOURCE_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true) - endforeach(RESOURCE_FILE) - - add_library(clickhouse_server_configs STATIC ${RESOURCE_OBJS}) - set_target_properties(clickhouse_server_configs PROPERTIES LINKER_LANGUAGE C) - - # whole-archive prevents symbols from being discarded for unknown reason - # CMake can shuffle each of target_link_libraries arguments with other - # libraries in linker command. To avoid this we hardcode whole-archive - # library into single string. - add_dependencies(clickhouse-server-lib clickhouse_server_configs) -endif () +clickhouse_embed_binaries(server config.xml users.xml embedded.xml play.html) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index b54e882c699..f169d793ee9 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -13,7 +13,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -47,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -71,6 +74,7 @@ #include #include #include +#include #if !defined(ARCADIA_BUILD) @@ -85,6 +89,8 @@ # include # include # include +# include +# include #endif #if USE_SSL @@ -97,7 +103,11 @@ #endif #if USE_NURAFT -# include +# include +#endif + +#if USE_JEMALLOC +# include #endif namespace CurrentMetrics @@ -108,11 +118,37 @@ namespace CurrentMetrics extern const Metric MaxDDLEntryID; } +namespace fs = std::filesystem; + +#if USE_JEMALLOC +static bool jemallocOptionEnabled(const char *name) +{ + bool value; + size_t size = sizeof(value); + + if (mallctl(name, reinterpret_cast(&value), &size, /* newp= */ nullptr, /* newlen= */ 0)) + throw Poco::SystemException("mallctl() failed"); + + return value; +} +#else +static bool jemallocOptionEnabled(const char *) { return 0; } +#endif + int mainEntryClickHouseServer(int argc, char ** argv) { DB::Server app; + if (jemallocOptionEnabled("opt.background_thread")) + { + LOG_ERROR(&app.logger(), + "jemalloc.background_thread was requested, " + "however ClickHouse uses percpu_arena and background_thread most likely will not give any benefits, " + "and also background_thread is not compatible with ClickHouse watchdog " + "(that can be disabled with CLICKHOUSE_WATCHDOG_ENABLE=0)"); + } + /// Do not fork separate process from watchdog if we attached to terminal. /// Otherwise it breaks gdb usage. /// Can be overridden by environment variable (cannot use server config at this moment). @@ -150,19 +186,19 @@ void setupTmpPath(Poco::Logger * log, const std::string & path) { LOG_DEBUG(log, "Setting up {} to store temporary data in it", path); - Poco::File(path).createDirectories(); + fs::create_directories(path); /// Clearing old temporary files. - Poco::DirectoryIterator dir_end; - for (Poco::DirectoryIterator it(path); it != dir_end; ++it) + fs::directory_iterator dir_end; + for (fs::directory_iterator it(path); it != dir_end; ++it) { - if (it->isFile() && startsWith(it.name(), "tmp")) + if (it->is_regular_file() && startsWith(it->path().filename(), "tmp")) { - LOG_DEBUG(log, "Removing old temporary file {}", it->path()); - it->remove(); + LOG_DEBUG(log, "Removing old temporary file {}", it->path().string()); + fs::remove(it->path()); } else - LOG_DEBUG(log, "Skipped file in temporary path {}", it->path()); + LOG_DEBUG(log, "Skipped file in temporary path {}", it->path().string()); } } @@ -172,18 +208,24 @@ int waitServersToFinish(std::vector & servers, size_t const int sleep_one_ms = 100; int sleep_current_ms = 0; int current_connections = 0; - while (sleep_current_ms < sleep_max_ms) + for (;;) { current_connections = 0; + for (auto & server : servers) { server.stop(); current_connections += server.currentConnections(); } + if (!current_connections) break; + sleep_current_ms += sleep_one_ms; - std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms)); + if (sleep_current_ms < sleep_max_ms) + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms)); + else + break; } return current_connections; } @@ -347,6 +389,11 @@ void Server::initialize(Poco::Util::Application & self) { BaseDaemon::initialize(self); logger().information("starting up"); + + LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}", + Poco::Environment::osName(), + Poco::Environment::osVersion(), + Poco::Environment::osArchitecture()); } std::string Server::getDefaultCorePath() const @@ -391,6 +438,19 @@ void checkForUsersNotInMainConfig( } +[[noreturn]] void forceShutdown() +{ +#if defined(THREAD_SANITIZER) && defined(OS_LINUX) + /// Thread sanitizer tries to do something on exit that we don't need if we want to exit immediately, + /// while connection handling threads are still run. + (void)syscall(SYS_exit_group, 0); + __builtin_unreachable(); +#else + _exit(0); +#endif +} + + int Server::main(const std::vector & /*args*/) { Poco::Logger * log = &logger(); @@ -425,8 +485,7 @@ int Server::main(const std::vector & /*args*/) * settings, available functions, data types, aggregate functions, databases, ... */ auto shared_context = Context::createShared(); - auto global_context = std::make_unique(Context::createGlobal(shared_context.get())); - global_context_ptr = global_context.get(); + global_context = Context::createGlobal(shared_context.get()); global_context->makeGlobalContext(); global_context->setApplicationType(Context::ApplicationType::SERVER); @@ -622,37 +681,38 @@ int Server::main(const std::vector & /*args*/) * Examples: do repair of local data; clone all replicated tables from replica. */ { - Poco::File(path + "flags/").createDirectories(); - global_context->setFlagsPath(path + "flags/"); + auto flags_path = fs::path(path) / "flags/"; + fs::create_directories(flags_path); + global_context->setFlagsPath(flags_path); } /** Directory with user provided files that are usable by 'file' table function. */ { - std::string user_files_path = config().getString("user_files_path", path + "user_files/"); + std::string user_files_path = config().getString("user_files_path", fs::path(path) / "user_files/"); global_context->setUserFilesPath(user_files_path); - Poco::File(user_files_path).createDirectories(); + fs::create_directories(user_files_path); } { - std::string dictionaries_lib_path = config().getString("dictionaries_lib_path", path + "dictionaries_lib/"); + std::string dictionaries_lib_path = config().getString("dictionaries_lib_path", fs::path(path) / "dictionaries_lib/"); global_context->setDictionariesLibPath(dictionaries_lib_path); - Poco::File(dictionaries_lib_path).createDirectories(); + fs::create_directories(dictionaries_lib_path); } /// top_level_domains_lists { - const std::string & top_level_domains_path = config().getString("top_level_domains_path", path + "top_level_domains/") + "/"; - TLDListsHolder::getInstance().parseConfig(top_level_domains_path, config()); + const std::string & top_level_domains_path = config().getString("top_level_domains_path", fs::path(path) / "top_level_domains/"); + TLDListsHolder::getInstance().parseConfig(fs::path(top_level_domains_path) / "", config()); } { - Poco::File(path + "data/").createDirectories(); - Poco::File(path + "metadata/").createDirectories(); + fs::create_directories(fs::path(path) / "data/"); + fs::create_directories(fs::path(path) / "metadata/"); /// Directory with metadata of tables, which was marked as dropped by Atomic database - Poco::File(path + "metadata_dropped/").createDirectories(); + fs::create_directories(fs::path(path) / "metadata_dropped/"); } if (config().has("interserver_http_port") && config().has("interserver_https_port")) @@ -688,16 +748,8 @@ int Server::main(const std::vector & /*args*/) } } - if (config().has("interserver_http_credentials")) - { - String user = config().getString("interserver_http_credentials.user", ""); - String password = config().getString("interserver_http_credentials.password", ""); - - if (user.empty()) - throw Exception("Configuration parameter interserver_http_credentials user can't be empty", ErrorCodes::NO_ELEMENTS_IN_CONFIG); - - global_context->setInterserverCredentials(user, password); - } + LOG_DEBUG(log, "Initiailizing interserver credentials."); + global_context->updateInterserverCredentials(config()); if (config().has("macros")) global_context->setMacros(std::make_unique(config(), "macros", log)); @@ -758,6 +810,7 @@ int Server::main(const std::vector & /*args*/) global_context->setClustersConfig(config); global_context->setMacros(std::make_unique(*config, "macros", log)); global_context->setExternalAuthenticatorsConfig(*config); + global_context->setExternalModelsConfig(config); /// Setup protection to avoid accidental DROP for big tables (that are greater than 50 GB by default) if (config->has("max_table_size_to_drop")) @@ -777,6 +830,7 @@ int Server::main(const std::vector & /*args*/) } global_context->updateStorageConfiguration(*config); + global_context->updateInterserverCredentials(*config); }, /* already_loaded = */ false); /// Reload it right now (initial loading) @@ -835,14 +889,15 @@ int Server::main(const std::vector & /*args*/) global_context->setMMappedFileCache(mmap_cache_size); #if USE_EMBEDDED_COMPILER - size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", 500); + constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 1024; + size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default); CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size); #endif /// Set path for format schema files - auto format_schema_path = Poco::File(config().getString("format_schema_path", path + "format_schemas/")); - global_context->setFormatSchemaPath(format_schema_path.path()); - format_schema_path.createDirectories(); + fs::path format_schema_path(config().getString("format_schema_path", fs::path(path) / "format_schemas/")); + global_context->setFormatSchemaPath(format_schema_path); + fs::create_directories(format_schema_path); /// Check sanity of MergeTreeSettings on server startup global_context->getMergeTreeSettings().sanityCheck(settings); @@ -867,15 +922,15 @@ int Server::main(const std::vector & /*args*/) listen_try = true; } - if (config().has("test_keeper_server")) + if (config().has("keeper_server")) { #if USE_NURAFT /// Initialize test keeper RAFT. Do nothing if no nu_keeper_server in config. - global_context->initializeNuKeeperStorageDispatcher(); + global_context->initializeKeeperStorageDispatcher(); for (const auto & listen_host : listen_hosts) { - /// TCP NuKeeper - const char * port_name = "test_keeper_server.tcp_port"; + /// TCP Keeper + const char * port_name = "keeper_server.tcp_port"; createServer(listen_host, port_name, listen_try, [&](UInt16 port) { Poco::Net::ServerSocket socket; @@ -885,9 +940,29 @@ int Server::main(const std::vector & /*args*/) servers_to_start_before_tables->emplace_back( port_name, std::make_unique( - new NuKeeperTCPHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams)); + new KeeperTCPHandlerFactory(*this, false), server_pool, socket, new Poco::Net::TCPServerParams)); - LOG_INFO(log, "Listening for connections to NuKeeper (tcp): {}", address.toString()); + LOG_INFO(log, "Listening for connections to Keeper (tcp): {}", address.toString()); + }); + + const char * secure_port_name = "keeper_server.tcp_port_secure"; + createServer(listen_host, secure_port_name, listen_try, [&](UInt16 port) + { +#if USE_SSL + Poco::Net::SecureServerSocket socket; + auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + socket.setReceiveTimeout(settings.receive_timeout); + socket.setSendTimeout(settings.send_timeout); + servers_to_start_before_tables->emplace_back( + secure_port_name, + std::make_unique( + new KeeperTCPHandlerFactory(*this, true), server_pool, socket, new Poco::Net::TCPServerParams)); + LOG_INFO(log, "Listening for connections to Keeper with secure protocol (tcp_secure): {}", address.toString()); +#else + UNUSED(port); + throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.", + ErrorCodes::SUPPORT_IS_DISABLED}; +#endif }); } #else @@ -934,13 +1009,15 @@ int Server::main(const std::vector & /*args*/) else LOG_INFO(log, "Closed connections to servers for tables."); - global_context->shutdownNuKeeperStorageDispatcher(); + global_context->shutdownKeeperStorageDispatcher(); } + /// Wait server pool to avoid use-after-free of destroyed context in the handlers + server_pool.joinAll(); + /** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available. * At this moment, no one could own shared part of Context. */ - global_context_ptr = nullptr; global_context.reset(); shared_context.reset(); LOG_DEBUG(log, "Destroyed global context."); @@ -954,14 +1031,16 @@ int Server::main(const std::vector & /*args*/) try { - loadMetadataSystem(*global_context); + loadMetadataSystem(global_context); /// After attaching system databases we can initialize system log. global_context->initializeSystemLogs(); auto & database_catalog = DatabaseCatalog::instance(); /// After the system database is created, attach virtual system tables (in addition to query_log and part_log) attachSystemTablesServer(*database_catalog.getSystemDatabase(), has_zookeeper); + /// We load temporary database first, because projections need it. + database_catalog.loadTemporaryDatabase(); /// Then, load remaining databases - loadMetadata(*global_context, default_database); + loadMetadata(global_context, default_database); database_catalog.loadDatabases(); /// After loading validate that default database exists database_catalog.assertDatabaseExists(default_database); @@ -986,7 +1065,7 @@ int Server::main(const std::vector & /*args*/) /// /// Look at compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h /// -#if USE_UNWIND && !WITH_COVERAGE && !defined(SANITIZER) +#if USE_UNWIND && !WITH_COVERAGE && !defined(SANITIZER) && defined(__x86_64__) /// Profilers cannot work reliably with any other libunwind or without PHDR cache. if (hasPHDRCache()) { @@ -1023,6 +1102,10 @@ int Server::main(const std::vector & /*args*/) " when two different stack unwinding methods will interfere with each other."); #endif +#if !defined(__x86_64__) + LOG_INFO(log, "Query Profiler is only tested on x86_64. It also known to not work under qemu-user."); +#endif + if (!hasPHDRCache()) LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created" " (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe)."); @@ -1037,7 +1120,7 @@ int Server::main(const std::vector & /*args*/) else { /// Initialize a watcher periodically updating DNS cache - dns_cache_updater = std::make_unique(*global_context, config().getInt("dns_cache_update_period", 15)); + dns_cache_updater = std::make_unique(global_context, config().getInt("dns_cache_update_period", 15)); } #if defined(OS_LINUX) @@ -1069,7 +1152,7 @@ int Server::main(const std::vector & /*args*/) { /// This object will periodically calculate some metrics. AsynchronousMetrics async_metrics( - *global_context, config().getUInt("asynchronous_metrics_update_period_s", 60), servers_to_start_before_tables, servers); + global_context, config().getUInt("asynchronous_metrics_update_period_s", 60), servers_to_start_before_tables, servers); attachSystemTablesAsync(*DatabaseCatalog::instance().getSystemDatabase(), async_metrics); for (const auto & listen_host : listen_hosts) @@ -1306,18 +1389,9 @@ int Server::main(const std::vector & /*args*/) } /// try to load dictionaries immediately, throw on error and die - ext::scope_guard dictionaries_xmls, models_xmls; try { - if (!config().getBool("dictionaries_lazy_load", true)) - { - global_context->tryCreateEmbeddedDictionaries(); - global_context->getExternalDictionariesLoader().enableAlwaysLoadEverything(true); - } - dictionaries_xmls = global_context->getExternalDictionariesLoader().addConfigRepository( - std::make_unique(config(), "dictionaries_config")); - models_xmls = global_context->getExternalModelsLoader().addConfigRepository( - std::make_unique(config(), "models_config")); + global_context->loadDictionaries(config()); } catch (...) { @@ -1332,7 +1406,7 @@ int Server::main(const std::vector & /*args*/) int pool_size = config().getInt("distributed_ddl.pool_size", 1); if (pool_size < 1) throw Exception("distributed_ddl.pool_size should be greater then 0", ErrorCodes::ARGUMENT_OUT_OF_BOUND); - global_context->setDDLWorker(std::make_unique(pool_size, ddl_zookeeper_path, *global_context, &config(), + global_context->setDDLWorker(std::make_unique(pool_size, ddl_zookeeper_path, global_context, &config(), "distributed_ddl", "DDLWorker", &CurrentMetrics::MaxDDLEntryID)); } @@ -1383,7 +1457,7 @@ int Server::main(const std::vector & /*args*/) /// Dump coverage here, because std::atexit callback would not be called. dumpCoverageReportIfPossible(); LOG_INFO(log, "Will shutdown forcefully."); - _exit(Application::EXIT_OK); + forceShutdown(); } }); diff --git a/programs/server/Server.h b/programs/server/Server.h index fbfc26f6ee5..45e5fccd51d 100644 --- a/programs/server/Server.h +++ b/programs/server/Server.h @@ -40,9 +40,9 @@ public: return BaseDaemon::logger(); } - Context & context() const override + ContextMutablePtr context() const override { - return *global_context_ptr; + return global_context; } bool isCancelled() const override @@ -64,8 +64,7 @@ protected: std::string getDefaultCorePath() const override; private: - Context * global_context_ptr = nullptr; - + ContextMutablePtr global_context; Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const; using CreateServerFunc = std::function; diff --git a/programs/server/config.d/keeper_port.xml b/programs/server/config.d/keeper_port.xml new file mode 120000 index 00000000000..6ebfce266fc --- /dev/null +++ b/programs/server/config.d/keeper_port.xml @@ -0,0 +1 @@ +../../../tests/config/config.d/keeper_port.xml \ No newline at end of file diff --git a/programs/server/config.d/test_keeper_port.xml b/programs/server/config.d/test_keeper_port.xml deleted file mode 120000 index f3f721caae0..00000000000 --- a/programs/server/config.d/test_keeper_port.xml +++ /dev/null @@ -1 +0,0 @@ -../../../tests/config/config.d/test_keeper_port.xml \ No newline at end of file diff --git a/programs/server/config.xml b/programs/server/config.xml index 9c01b328290..75647b10416 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -7,7 +7,20 @@ --> - + trace /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.err.log @@ -76,7 +89,7 @@ - + 9005 1000 + + 1073741824 /var/lib/clickhouse/ @@ -347,6 +362,20 @@ bind_dn - template used to construct the DN to bind to. The resulting DN will be constructed by replacing all '{user_name}' substrings of the template with the actual user name during each authentication attempt. + user_dn_detection - section with LDAP search parameters for detecting the actual user DN of the bound user. + This is mainly used in search filters for further role mapping when the server is Active Directory. The + resulting user DN will be used when replacing '{user_dn}' substrings wherever they are allowed. By default, + user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected + user DN value. + base_dn - template used to construct the base DN for the LDAP search. + The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings + of the template with the actual user name and bind DN during the LDAP search. + scope - scope of the LDAP search. + Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default). + search_filter - template used to construct the search filter for the LDAP search. + The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}' + substrings of the template with the actual user name, bind DN, and base DN during the LDAP search. + Note, that the special characters must be escaped properly in XML. verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server. Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request. @@ -378,6 +407,17 @@ /path/to/tls_ca_cert_dir ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384 + Example (typical Active Directory with configured user DN detection for further role mapping): + + localhost + 389 + EXAMPLE\{user_name} + + CN=Users,DC=example,DC=com + (&(objectClass=user)(sAMAccountName={user_name})) + + no + --> @@ -429,15 +469,16 @@ There can be multiple 'role_mapping' sections defined inside the same 'ldap' section. All of them will be applied. base_dn - template used to construct the base DN for the LDAP search. - The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings - of the template with the actual user name and bind DN during each LDAP search. + The resulting DN will be constructed by replacing all '{user_name}', '{bind_dn}', and '{user_dn}' + substrings of the template with the actual user name, bind DN, and user DN during each LDAP search. scope - scope of the LDAP search. Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default). search_filter - template used to construct the search filter for the LDAP search. - The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}' - substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. + The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', '{user_dn}', and + '{base_dn}' substrings of the template with the actual user name, bind DN, user DN, and base DN during + each LDAP search. Note, that the special characters must be escaped properly in XML. - attribute - attribute name whose values will be returned by the LDAP search. + attribute - attribute name whose values will be returned by the LDAP search. 'cn', by default. prefix - prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated as local role names. Empty, by default. @@ -456,6 +497,17 @@ clickhouse_ + Example (typical Active Directory with role mapping that relies on the detected user DN): + + my_ad_server + + CN=Users,DC=example,DC=com + CN + subtree + (&(objectClass=group)(member={user_dn})) + clickhouse_ + + --> @@ -505,6 +557,33 @@ false + ' | sed -e 's|.*>\(.*\)<.*|\1|') + wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb + apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb + clickhouse-jdbc-bridge & + + * [CentOS/RHEL] + export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge + export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|') + wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm + yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm + clickhouse-jdbc-bridge & + + Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information. + ]]> + + diff --git a/programs/server/config.yaml.example b/programs/server/config.yaml.example new file mode 100644 index 00000000000..bebfd74ff58 --- /dev/null +++ b/programs/server/config.yaml.example @@ -0,0 +1,950 @@ +# This is an example of a configuration file "config.xml" rewritten in YAML +# You can read this documentation for detailed information about YAML configuration: +# https://clickhouse.tech/docs/en/operations/configuration-files/ + +# NOTE: User and query level settings are set up in "users.yaml" file. +# If you have accidentally specified user-level settings here, server won't start. +# You can either move the settings to the right place inside "users.xml" file +# or add skip_check_for_incorrect_settings: 1 here. +logger: + # Possible levels [1]: + # - none (turns off logging) + # - fatal + # - critical + # - error + # - warning + # - notice + # - information + # - debug + # - trace + # [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114 + level: trace + log: /var/log/clickhouse-server/clickhouse-server.log + errorlog: /var/log/clickhouse-server/clickhouse-server.err.log + # Rotation policy + # See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85 + size: 1000M + count: 10 + # console: 1 + # Default behavior is autodetection (log to console if not daemon mode and is tty) + + # Per level overrides (legacy): + # For example to suppress logging of the ConfigReloader you can use: + # NOTE: levels.logger is reserved, see below. + # levels: + # ConfigReloader: none + + # Per level overrides: + # For example to suppress logging of the RBAC for default user you can use: + # (But please note that the logger name maybe changed from version to version, even after minor upgrade) + # levels: + # - logger: + # name: 'ContextAccess (default)' + # level: none + # - logger: + # name: 'DatabaseOrdinary (test)' + # level: none + +# It is the name that will be shown in the clickhouse-client. +# By default, anything with "production" will be highlighted in red in query prompt. +# display_name: production + +# Port for HTTP API. See also 'https_port' for secure connections. +# This interface is also used by ODBC and JDBC drivers (DataGrip, Dbeaver, ...) +# and by most of web interfaces (embedded UI, Grafana, Redash, ...). +http_port: 8123 + +# Port for interaction by native protocol with: +# - clickhouse-client and other native ClickHouse tools (clickhouse-benchmark, clickhouse-copier); +# - clickhouse-server with other clickhouse-servers for distributed query processing; +# - ClickHouse drivers and applications supporting native protocol +# (this protocol is also informally called as "the TCP protocol"); +# See also 'tcp_port_secure' for secure connections. +tcp_port: 9000 + +# Compatibility with MySQL protocol. +# ClickHouse will pretend to be MySQL for applications connecting to this port. +mysql_port: 9004 + +# Compatibility with PostgreSQL protocol. +# ClickHouse will pretend to be PostgreSQL for applications connecting to this port. +postgresql_port: 9005 + +# HTTP API with TLS (HTTPS). +# You have to configure certificate to enable this interface. +# See the openSSL section below. +# https_port: 8443 + +# Native interface with TLS. +# You have to configure certificate to enable this interface. +# See the openSSL section below. +# tcp_port_secure: 9440 + +# Native interface wrapped with PROXYv1 protocol +# PROXYv1 header sent for every connection. +# ClickHouse will extract information about proxy-forwarded client address from the header. +# tcp_with_proxy_port: 9011 + +# Port for communication between replicas. Used for data exchange. +# It provides low-level data access between servers. +# This port should not be accessible from untrusted networks. +# See also 'interserver_http_credentials'. +# Data transferred over connections to this port should not go through untrusted networks. +# See also 'interserver_https_port'. +interserver_http_port: 9009 + +# Port for communication between replicas with TLS. +# You have to configure certificate to enable this interface. +# See the openSSL section below. +# See also 'interserver_http_credentials'. +# interserver_https_port: 9010 + +# Hostname that is used by other replicas to request this server. +# If not specified, than it is determined analogous to 'hostname -f' command. +# This setting could be used to switch replication to another network interface +# (the server may be connected to multiple networks via multiple addresses) +# interserver_http_host: example.yandex.ru + +# You can specify credentials for authenthication between replicas. +# This is required when interserver_https_port is accessible from untrusted networks, +# and also recommended to avoid SSRF attacks from possibly compromised services in your network. +# interserver_http_credentials: +# user: interserver +# password: '' + +# Listen specified address. +# Use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. +# Notes: +# If you open connections from wildcard address, make sure that at least one of the following measures applied: +# - server is protected by firewall and not accessible from untrusted networks; +# - all users are restricted to subset of network addresses (see users.xml); +# - all users have strong passwords, only secure (TLS) interfaces are accessible, or connections are only made via TLS interfaces. +# - users without password have readonly access. +# See also: https://www.shodan.io/search?query=clickhouse +# listen_host: '::' + +# Same for hosts without support for IPv6: +# listen_host: 0.0.0.0 + +# Default values - try listen localhost on IPv4 and IPv6. +# listen_host: '::1' +# listen_host: 127.0.0.1 + +# Don't exit if IPv6 or IPv4 networks are unavailable while trying to listen. +# listen_try: 0 + +# Allow multiple servers to listen on the same address:port. This is not recommended. +# listen_reuse_port: 0 + +# listen_backlog: 64 +max_connections: 4096 + +# For 'Connection: keep-alive' in HTTP 1.1 +keep_alive_timeout: 3 + +# gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) +# grpc_port: 9100 +grpc: + enable_ssl: false + + # The following two files are used only if enable_ssl=1 + ssl_cert_file: /path/to/ssl_cert_file + ssl_key_file: /path/to/ssl_key_file + + # Whether server will request client for a certificate + ssl_require_client_auth: false + + # The following file is used only if ssl_require_client_auth=1 + ssl_ca_cert_file: /path/to/ssl_ca_cert_file + + # Default compression algorithm (applied if client doesn't specify another algorithm). + # Supported algorithms: none, deflate, gzip, stream_gzip + compression: deflate + + # Default compression level (applied if client doesn't specify another level). + # Supported levels: none, low, medium, high + compression_level: medium + + # Send/receive message size limits in bytes. -1 means unlimited + max_send_message_size: -1 + max_receive_message_size: -1 + + # Enable if you want very detailed logs + verbose_logs: false + +# Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 +openSSL: + server: + # Used for https server AND secure tcp port + # openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt + certificateFile: /etc/clickhouse-server/server.crt + privateKeyFile: /etc/clickhouse-server/server.key + + # dhparams are optional. You can delete the dhParamsFile: element. + # To generate dhparams, use the following command: + # openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 + # Only file format with BEGIN DH PARAMETERS is supported. + dhParamsFile: /etc/clickhouse-server/dhparam.pem + verificationMode: none + loadDefaultCAFile: true + cacheSessions: true + disableProtocols: 'sslv2,sslv3' + preferServerCiphers: true + client: + # Used for connecting to https dictionary source and secured Zookeeper communication + loadDefaultCAFile: true + cacheSessions: true + disableProtocols: 'sslv2,sslv3' + preferServerCiphers: true + + # Use for self-signed: verificationMode: none + invalidCertificateHandler: + # Use for self-signed: name: AcceptCertificateHandler + name: RejectCertificateHandler + +# Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 +# http_server_default_response: |- +#
+ +# Maximum number of concurrent queries. +max_concurrent_queries: 100 + +# Maximum memory usage (resident set size) for server process. +# Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM. +# If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down. + +# The constraint is checked on query execution time. +# If a query tries to allocate memory and the current memory usage plus allocation is greater +# than specified threshold, exception will be thrown. + +# It is not practical to set this constraint to small values like just a few gigabytes, +# because memory allocator will keep this amount of memory in caches and the server will deny service of queries. +max_server_memory_usage: 0 + +# Maximum number of threads in the Global thread pool. +# This will default to a maximum of 10000 threads if not specified. +# This setting will be useful in scenarios where there are a large number +# of distributed queries that are running concurrently but are idling most +# of the time, in which case a higher number of threads might be required. +max_thread_pool_size: 10000 + +# On memory constrained environments you may have to set this to value larger than 1. +max_server_memory_usage_to_ram_ratio: 0.9 + +# Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes). +# Data will be stored in system.trace_log table with query_id = empty string. +# Zero means disabled. +total_memory_profiler_step: 4194304 + +# Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. +# The probability is for every alloc/free regardless to the size of the allocation. +# Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit, +# which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered. +# You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling. +total_memory_tracker_sample_probability: 0 + +# Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve +# correct maximum value. +# max_open_files: 262144 + +# Size of cache of uncompressed blocks of data, used in tables of MergeTree family. +# In bytes. Cache is single for server. Memory is allocated only on demand. +# Cache is used when 'use_uncompressed_cache' user setting turned on (off by default). +# Uncompressed cache is advantageous only for very short queries and in rare cases. + +# Note: uncompressed cache can be pointless for lz4, because memory bandwidth +# is slower than multi-core decompression on some server configurations. +# Enabling it can sometimes paradoxically make queries slower. +uncompressed_cache_size: 8589934592 + +# Approximate size of mark cache, used in tables of MergeTree family. +# In bytes. Cache is single for server. Memory is allocated only on demand. +# You should not lower this value. +mark_cache_size: 5368709120 + +# If you enable the `min_bytes_to_use_mmap_io` setting, +# the data in MergeTree tables can be read with mmap to avoid copying from kernel to userspace. +# It makes sense only for large files and helps only if data reside in page cache. +# To avoid frequent open/mmap/munmap/close calls (which are very expensive due to consequent page faults) +# and to reuse mappings from several threads and queries, +# the cache of mapped files is maintained. Its size is the number of mapped regions (usually equal to the number of mapped files). +# The amount of data in mapped files can be monitored +# in system.metrics, system.metric_log by the MMappedFiles, MMappedFileBytes metrics +# and in system.asynchronous_metrics, system.asynchronous_metrics_log by the MMapCacheCells metric, +# and also in system.events, system.processes, system.query_log, system.query_thread_log by the +# CreatedReadBufferMMap, CreatedReadBufferMMapFailed, MMappedFileCacheHits, MMappedFileCacheMisses events. +# Note that the amount of data in mapped files does not consume memory directly and is not accounted +# in query or server memory usage - because this memory can be discarded similar to OS page cache. +# The cache is dropped (the files are closed) automatically on removal of old parts in MergeTree, +# also it can be dropped manually by the SYSTEM DROP MMAP CACHE query. +mmap_cache_size: 1000 + +# Cache size for compiled expressions. +compiled_expression_cache_size: 1073741824 + +# Path to data directory, with trailing slash. +path: /var/lib/clickhouse/ + +# Path to temporary data for processing hard queries. +tmp_path: /var/lib/clickhouse/tmp/ + +# Policy from the for the temporary files. +# If not set is used, otherwise is ignored. + +# Notes: +# - move_factor is ignored +# - keep_free_space_bytes is ignored +# - max_data_part_size_bytes is ignored +# - you must have exactly one volume in that policy +# tmp_policy: tmp + +# Directory with user provided files that are accessible by 'file' table function. +user_files_path: /var/lib/clickhouse/user_files/ + +# LDAP server definitions. +ldap_servers: '' + +# List LDAP servers with their connection parameters here to later 1) use them as authenticators for dedicated local users, +# who have 'ldap' authentication mechanism specified instead of 'password', or to 2) use them as remote user directories. +# Parameters: +# host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty. +# port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise. +# bind_dn - template used to construct the DN to bind to. +# The resulting DN will be constructed by replacing all '{user_name}' substrings of the template with the actual +# user name during each authentication attempt. +# user_dn_detection - section with LDAP search parameters for detecting the actual user DN of the bound user. +# This is mainly used in search filters for further role mapping when the server is Active Directory. The +# resulting user DN will be used when replacing '{user_dn}' substrings wherever they are allowed. By default, +# user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected +# user DN value. +# base_dn - template used to construct the base DN for the LDAP search. +# The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings +# of the template with the actual user name and bind DN during the LDAP search. +# scope - scope of the LDAP search. +# Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default). +# search_filter - template used to construct the search filter for the LDAP search. +# The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}' +# substrings of the template with the actual user name, bind DN, and base DN during the LDAP search. +# Note, that the special characters must be escaped properly in XML. +# verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed +# to be successfully authenticated for all consecutive requests without contacting the LDAP server. +# Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request. +# enable_tls - flag to trigger use of secure connection to the LDAP server. +# Specify 'no' for plain text (ldap://) protocol (not recommended). +# Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default). +# Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS). +# tls_minimum_protocol_version - the minimum protocol version of SSL/TLS. +# Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default). +# tls_require_cert - SSL/TLS peer certificate verification behavior. +# Accepted values are: 'never', 'allow', 'try', 'demand' (the default). +# tls_cert_file - path to certificate file. +# tls_key_file - path to certificate key file. +# tls_ca_cert_file - path to CA certificate file. +# tls_ca_cert_dir - path to the directory containing CA certificates. +# tls_cipher_suite - allowed cipher suite (in OpenSSL notation). +# Example: +# my_ldap_server: +# host: localhost +# port: 636 +# bind_dn: 'uid={user_name},ou=users,dc=example,dc=com' +# verification_cooldown: 300 +# enable_tls: yes +# tls_minimum_protocol_version: tls1.2 +# tls_require_cert: demand +# tls_cert_file: /path/to/tls_cert_file +# tls_key_file: /path/to/tls_key_file +# tls_ca_cert_file: /path/to/tls_ca_cert_file +# tls_ca_cert_dir: /path/to/tls_ca_cert_dir +# tls_cipher_suite: ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384 + +# Example (typical Active Directory with configured user DN detection for further role mapping): +# my_ad_server: +# host: localhost +# port: 389 +# bind_dn: 'EXAMPLE\{user_name}' +# user_dn_detection: +# base_dn: CN=Users,DC=example,DC=com +# search_filter: '(&(objectClass=user)(sAMAccountName={user_name}))' +# enable_tls: no + +# To enable Kerberos authentication support for HTTP requests (GSS-SPNEGO), for those users who are explicitly configured +# to authenticate via Kerberos, define a single 'kerberos' section here. +# Parameters: +# principal - canonical service principal name, that will be acquired and used when accepting security contexts. +# This parameter is optional, if omitted, the default principal will be used. +# This parameter cannot be specified together with 'realm' parameter. +# realm - a realm, that will be used to restrict authentication to only those requests whose initiator's realm matches it. +# This parameter is optional, if omitted, no additional filtering by realm will be applied. +# This parameter cannot be specified together with 'principal' parameter. +# Example: +# kerberos: '' + +# Example: +# kerberos: +# principal: HTTP/clickhouse.example.com@EXAMPLE.COM + +# Example: +# kerberos: +# realm: EXAMPLE.COM + +# Sources to read users, roles, access rights, profiles of settings, quotas. +user_directories: + users_xml: + # Path to configuration file with predefined users. + path: users.yaml + local_directory: + # Path to folder where users created by SQL commands are stored. + path: /var/lib/clickhouse/access/ + +# # To add an LDAP server as a remote user directory of users that are not defined locally, define a single 'ldap' section +# # with the following parameters: +# # server - one of LDAP server names defined in 'ldap_servers' config section above. +# # This parameter is mandatory and cannot be empty. +# # roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. +# # If no roles are specified here or assigned during role mapping (below), user will not be able to perform any +# # actions after authentication. +# # role_mapping - section with LDAP search parameters and mapping rules. +# # When a user authenticates, while still bound to LDAP, an LDAP search is performed using search_filter and the +# # name of the logged in user. For each entry found during that search, the value of the specified attribute is +# # extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the +# # value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by +# # CREATE ROLE command. +# # There can be multiple 'role_mapping' sections defined inside the same 'ldap' section. All of them will be +# # applied. +# # base_dn - template used to construct the base DN for the LDAP search. +# # The resulting DN will be constructed by replacing all '{user_name}', '{bind_dn}', and '{user_dn}' +# # substrings of the template with the actual user name, bind DN, and user DN during each LDAP search. +# # scope - scope of the LDAP search. +# # Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default). +# # search_filter - template used to construct the search filter for the LDAP search. +# # The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', '{user_dn}', and +# # '{base_dn}' substrings of the template with the actual user name, bind DN, user DN, and base DN during +# # each LDAP search. +# # Note, that the special characters must be escaped properly in XML. +# # attribute - attribute name whose values will be returned by the LDAP search. 'cn', by default. +# # prefix - prefix, that will be expected to be in front of each string in the original list of strings returned by +# # the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated +# # as local role names. Empty, by default. +# # Example: +# # ldap: +# # server: my_ldap_server +# # roles: +# # my_local_role1: '' +# # my_local_role2: '' +# # role_mapping: +# # base_dn: 'ou=groups,dc=example,dc=com' +# # scope: subtree +# # search_filter: '(&(objectClass=groupOfNames)(member={bind_dn}))' +# # attribute: cn +# # prefix: clickhouse_ +# # Example (typical Active Directory with role mapping that relies on the detected user DN): +# # ldap: +# # server: my_ad_server +# # role_mapping: +# # base_dn: 'CN=Users,DC=example,DC=com' +# # attribute: CN +# # scope: subtree +# # search_filter: '(&(objectClass=group)(member={user_dn}))' +# # prefix: clickhouse_ + +# Default profile of settings. +default_profile: default + +# Comma-separated list of prefixes for user-defined settings. +# custom_settings_prefixes: '' +# System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). +# system_profile: default + +# Buffer profile of settings. +# This settings are used by Buffer storage to flush data to the underlying table. +# Default: used from system_profile directive. +# buffer_profile: default + +# Default database. +default_database: default + +# Server time zone could be set here. + +# Time zone is used when converting between String and DateTime types, +# when printing DateTime in text formats and parsing DateTime from text, +# it is used in date and time related functions, if specific time zone was not passed as an argument. + +# Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan. +# If not specified, system time zone at server startup is used. + +# Please note, that server could display time zone alias instead of specified name. +# Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC. +# timezone: Europe/Moscow + +# You can specify umask here (see "man umask"). Server will apply it on startup. +# Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read). +# umask: 022 + +# Perform mlockall after startup to lower first queries latency +# and to prevent clickhouse executable from being paged out under high IO load. +# Enabling this option is recommended but will lead to increased startup time for up to a few seconds. +mlock_executable: true + +# Reallocate memory for machine code ("text") using huge pages. Highly experimental. +remap_executable: false + +# Uncomment below in order to use JDBC table engine and function. +# To install and run JDBC bridge in background: +# * [Debian/Ubuntu] +# export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge +# export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|') +# wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb +# apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb +# clickhouse-jdbc-bridge & +# * [CentOS/RHEL] +# export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge +# export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|') +# wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm +# yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm +# clickhouse-jdbc-bridge & +# Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information. + +# jdbc_bridge: +# host: 127.0.0.1 +# port: 9019 + +# Configuration of clusters that could be used in Distributed tables. +# https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +remote_servers: + # Test only shard config for testing distributed storage + test_shard_localhost: + # Inter-server per-cluster secret for Distributed queries + # default: no secret (no authentication will be performed) + + # If set, then Distributed queries will be validated on shards, so at least: + # - such cluster should exist on the shard, + # - such cluster should have the same secret. + + # And also (and which is more important), the initial_user will + # be used as current user for the query. + + # Right now the protocol is pretty simple and it only takes into account: + # - cluster name + # - query + + # Also it will be nice if the following will be implemented: + # - source hostname (see interserver_http_host), but then it will depends from DNS, + # it can use IP address instead, but then the you need to get correct on the initiator node. + # - target hostname / ip address (same notes as for source hostname) + # - time-based security tokens + # secret: '' + shard: + # Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). + # internal_replication: false + # Optional. Shard weight when writing data. Default: 1. + # weight: 1 + replica: + host: localhost + port: 9000 + # Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). + # priority: 1 + test_cluster_two_shards_localhost: + shard: + - replica: + host: localhost + port: 9000 + - replica: + host: localhost + port: 9000 + test_cluster_two_shards: + shard: + - replica: + host: 127.0.0.1 + port: 9000 + - replica: + host: 127.0.0.2 + port: 9000 + test_cluster_two_shards_internal_replication: + shard: + - internal_replication: true + replica: + host: 127.0.0.1 + port: 9000 + - internal_replication: true + replica: + host: 127.0.0.2 + port: 9000 + test_shard_localhost_secure: + shard: + replica: + host: localhost + port: 9440 + secure: 1 + test_unavailable_shard: + shard: + - replica: + host: localhost + port: 9000 + - replica: + host: localhost + port: 1 + +# The list of hosts allowed to use in URL-related storage engines and table functions. +# If this section is not present in configuration, all hosts are allowed. +# remote_url_allow_hosts: + +# Host should be specified exactly as in URL. The name is checked before DNS resolution. +# Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts. +# If port is explicitly specified in URL, the host:port is checked as a whole. +# If host specified here without port, any port with this host allowed. +# "yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed. +# If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]". +# If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked. + +# Regular expression can be specified. RE2 engine is used for regexps. +# Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter +# (forgetting to do so is a common source of error). + +# If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file. +# By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element. +# Values for substitutions are specified in /yandex/name_of_substitution elements in that file. + +# ZooKeeper is used to store metadata about replicas, when using Replicated tables. +# Optional. If you don't use replicated tables, you could omit that. +# See https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/replication/ + +# zookeeper: +# - node: +# host: example1 +# port: 2181 +# - node: +# host: example2 +# port: 2181 +# - node: +# host: example3 +# port: 2181 + +# Substitutions for parameters of replicated tables. +# Optional. If you don't use replicated tables, you could omit that. +# See https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables +# macros: +# shard: 01 +# replica: example01-01-1 + +# Reloading interval for embedded dictionaries, in seconds. Default: 3600. +builtin_dictionaries_reload_interval: 3600 + +# Maximum session timeout, in seconds. Default: 3600. +max_session_timeout: 3600 + +# Default session timeout, in seconds. Default: 60. +default_session_timeout: 60 + +# Sending data to Graphite for monitoring. Several sections can be defined. +# interval - send every X second +# root_path - prefix for keys +# hostname_in_path - append hostname to root_path (default = true) +# metrics - send data from table system.metrics +# events - send data from table system.events +# asynchronous_metrics - send data from table system.asynchronous_metrics + +# graphite: +# host: localhost +# port: 42000 +# timeout: 0.1 +# interval: 60 +# root_path: one_min +# hostname_in_path: true + +# metrics: true +# events: true +# events_cumulative: false +# asynchronous_metrics: true + +# graphite: +# host: localhost +# port: 42000 +# timeout: 0.1 +# interval: 1 +# root_path: one_sec + +# metrics: true +# events: true +# events_cumulative: false +# asynchronous_metrics: false + +# Serve endpoint for Prometheus monitoring. +# endpoint - mertics path (relative to root, statring with "/") +# port - port to setup server. If not defined or 0 than http_port used +# metrics - send data from table system.metrics +# events - send data from table system.events +# asynchronous_metrics - send data from table system.asynchronous_metrics +# status_info - send data from different component from CH, ex: Dictionaries status + +# prometheus: +# endpoint: /metrics +# port: 9363 + +# metrics: true +# events: true +# asynchronous_metrics: true +# status_info: true + +# Query log. Used only for queries with setting log_queries = 1. +query_log: + # What table to insert data. If table is not exist, it will be created. + # When query log structure is changed after system update, + # then old table will be renamed and new table will be created automatically. + database: system + table: query_log + + # PARTITION BY expr: https://clickhouse.yandex/docs/en/table_engines/mergetree-family/custom_partitioning_key/ + # Example: + # event_date + # toMonday(event_date) + # toYYYYMM(event_date) + # toStartOfHour(event_time) + partition_by: toYYYYMM(event_date) + + # Table TTL specification: https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl + # Example: + # event_date + INTERVAL 1 WEEK + # event_date + INTERVAL 7 DAY DELETE + # event_date + INTERVAL 2 WEEK TO DISK 'bbb' + + # ttl: 'event_date + INTERVAL 30 DAY DELETE' + + # Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters, + # Example: engine: 'ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024' + + # Interval of flushing data. + flush_interval_milliseconds: 7500 + +# Trace log. Stores stack traces collected by query profilers. +# See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. +trace_log: + database: system + table: trace_log + partition_by: toYYYYMM(event_date) + flush_interval_milliseconds: 7500 + +# Query thread log. Has information about all threads participated in query execution. +# Used only for queries with setting log_query_threads = 1. +query_thread_log: + database: system + table: query_thread_log + partition_by: toYYYYMM(event_date) + flush_interval_milliseconds: 7500 + +# Uncomment if use part log. +# Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads). +# part_log: +# database: system +# table: part_log +# flush_interval_milliseconds: 7500 + +# Uncomment to write text log into table. +# Text log contains all information from usual server log but stores it in structured and efficient way. +# The level of the messages that goes to the table can be limited (), if not specified all messages will go to the table. +# text_log: +# database: system +# table: text_log +# flush_interval_milliseconds: 7500 +# level: '' + +# Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. +metric_log: + database: system + table: metric_log + flush_interval_milliseconds: 7500 + collect_interval_milliseconds: 1000 + +# Asynchronous metric log contains values of metrics from +# system.asynchronous_metrics. +asynchronous_metric_log: + database: system + table: asynchronous_metric_log + + # Asynchronous metrics are updated once a minute, so there is + # no need to flush more often. + flush_interval_milliseconds: 60000 + +# OpenTelemetry log contains OpenTelemetry trace spans. +opentelemetry_span_log: + + # The default table creation code is insufficient, this spec + # is a workaround. There is no 'event_time' for this log, but two times, + # start and finish. It is sorted by finish time, to avoid inserting + # data too far away in the past (probably we can sometimes insert a span + # that is seconds earlier than the last span in the table, due to a race + # between several spans inserted in parallel). This gives the spans a + # global order that we can use to e.g. retry insertion into some external + # system. + engine: |- + engine MergeTree + partition by toYYYYMM(finish_date) + order by (finish_date, finish_time_us, trace_id) + database: system + table: opentelemetry_span_log + flush_interval_milliseconds: 7500 + +# Crash log. Stores stack traces for fatal errors. +# This table is normally empty. +crash_log: + database: system + table: crash_log + partition_by: '' + flush_interval_milliseconds: 1000 + +# Parameters for embedded dictionaries, used in Yandex.Metrica. +# See https://clickhouse.yandex/docs/en/dicts/internal_dicts/ + +# Path to file with region hierarchy. +# path_to_regions_hierarchy_file: /opt/geo/regions_hierarchy.txt + +# Path to directory with files containing names of regions +# path_to_regions_names_files: /opt/geo/ + + +# top_level_domains_path: /var/lib/clickhouse/top_level_domains/ +# Custom TLD lists. +# Format: name: /path/to/file + +# Changes will not be applied w/o server restart. +# Path to the list is under top_level_domains_path (see above). +top_level_domains_lists: '' + +# public_suffix_list: /path/to/public_suffix_list.dat + +# Configuration of external dictionaries. See: +# https://clickhouse.tech/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts +dictionaries_config: '*_dictionary.xml' + +# Uncomment if you want data to be compressed 30-100% better. +# Don't do that if you just started using ClickHouse. + +# compression: +# # Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. +# case: +# Conditions. All must be satisfied. Some conditions may be omitted. +# # min_part_size: 10000000000 # Min part size in bytes. +# # min_part_size_ratio: 0.01 # Min size of part relative to whole table size. +# # What compression method to use. +# method: zstd + +# Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster. +# Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. +distributed_ddl: + # Path in ZooKeeper to queue with DDL queries + path: /clickhouse/task_queue/ddl + + # Settings from this profile will be used to execute DDL queries + # profile: default + + # Controls how much ON CLUSTER queries can be run simultaneously. + # pool_size: 1 + + # Cleanup settings (active tasks will not be removed) + + # Controls task TTL (default 1 week) + # task_max_lifetime: 604800 + + # Controls how often cleanup should be performed (in seconds) + # cleanup_delay_period: 60 + + # Controls how many tasks could be in the queue + # max_tasks_in_queue: 1000 + +# Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h +# merge_tree: +# max_suspicious_broken_parts: 5 + +# Protection from accidental DROP. +# If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query. +# If you want do delete one table and don't want to change clickhouse-server config, you could create special file /flags/force_drop_table and make DROP once. +# By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables. +# The same for max_partition_size_to_drop. +# Uncomment to disable protection. + +# max_table_size_to_drop: 0 +# max_partition_size_to_drop: 0 + +# Example of parameters for GraphiteMergeTree table engine +graphite_rollup_example: + pattern: + regexp: click_cost + function: any + retention: + - age: 0 + precision: 3600 + - age: 86400 + precision: 60 + default: + function: max + retention: + - age: 0 + precision: 60 + - age: 3600 + precision: 300 + - age: 86400 + precision: 3600 + +# Directory in containing schema files for various input formats. +# The directory will be created if it doesn't exist. +format_schema_path: /var/lib/clickhouse/format_schemas/ + +# Default query masking rules, matching lines would be replaced with something else in the logs +# (both text logs and system.query_log). +# name - name for the rule (optional) +# regexp - RE2 compatible regular expression (mandatory) +# replace - substitution string for sensitive data (optional, by default - six asterisks) +query_masking_rules: + rule: + name: hide encrypt/decrypt arguments + regexp: '((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:''(?:\\''|.)+''|.*?)\s*\)' + # or more secure, but also more invasive: + # (aes_\w+)\s*\(.*\) + replace: \1(???) + +# Uncomment to use custom http handlers. +# rules are checked from top to bottom, first match runs the handler +# url - to match request URL, you can use 'regex:' prefix to use regex match(optional) +# methods - to match request method, you can use commas to separate multiple method matches(optional) +# headers - to match request headers, match each child element(child element name is header name), you can use 'regex:' prefix to use regex match(optional) +# handler is request handler +# type - supported types: static, dynamic_query_handler, predefined_query_handler +# query - use with predefined_query_handler type, executes query when the handler is called +# query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the value in HTTP request params +# status - use with static type, response status code +# content_type - use with static type, response content-type +# response_content - use with static type, Response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client. + +# http_handlers: +# - rule: +# url: / +# methods: POST,GET +# headers: +# pragma: no-cache +# handler: +# type: dynamic_query_handler +# query_param_name: query +# - rule: +# url: /predefined_query +# methods: POST,GET +# handler: +# type: predefined_query_handler +# query: 'SELECT * FROM system.settings' +# - rule: +# handler: +# type: static +# status: 200 +# content_type: 'text/plain; charset=UTF-8' +# response_content: config://http_server_default_response + +send_crash_reports: + # Changing to true allows sending crash reports to + # the ClickHouse core developers team via Sentry https://sentry.io + # Doing so at least in pre-production environments is highly appreciated + enabled: false + # Change to true if you don't feel comfortable attaching the server hostname to the crash report + anonymize: false + # Default endpoint should be changed to different Sentry DSN only if you have + # some in-house engineers or hired consultants who're going to debug ClickHouse issues for you + endpoint: 'https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277' + # Uncomment to disable ClickHouse internal DNS caching. + # disable_internal_dns_cache: 1 diff --git a/programs/server/data/.gitignore b/programs/server/data/.gitignore deleted file mode 100644 index b9719d9d1d1..00000000000 --- a/programs/server/data/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.txt -*.dat -*.idx diff --git a/programs/server/metadata/.gitignore b/programs/server/metadata/.gitignore deleted file mode 100644 index d1b811b7de5..00000000000 --- a/programs/server/metadata/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.sql diff --git a/programs/server/users.yaml.example b/programs/server/users.yaml.example new file mode 100644 index 00000000000..76aee04c19b --- /dev/null +++ b/programs/server/users.yaml.example @@ -0,0 +1,107 @@ +# Profiles of settings. +profiles: + # Default settings. + default: + # Maximum memory usage for processing single query, in bytes. + max_memory_usage: 10000000000 + + # How to choose between replicas during distributed query processing. + # random - choose random replica from set of replicas with minimum number of errors + # nearest_hostname - from set of replicas with minimum number of errors, choose replica + # with minimum number of different symbols between replica's hostname and local hostname (Hamming distance). + # in_order - first live replica is chosen in specified order. + # first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors. + load_balancing: random + + # Profile that allows only read queries. + readonly: + readonly: 1 + +# Users and ACL. +users: + # If user name was not specified, 'default' user is used. + default: + # Password could be specified in plaintext or in SHA256 (in hex format). + # + # If you want to specify password in plaintext (not recommended), place it in 'password' element. + # Example: password: qwerty + # Password could be empty. + # + # If you want to specify SHA256, place it in 'password_sha256_hex' element. + # Example: password_sha256_hex: 65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5 + # Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019). + # + # If you want to specify double SHA1, place it in 'password_double_sha1_hex' element. + # Example: password_double_sha1_hex: e395796d6546b1b65db9d665cd43f0e858dd4303 + # + # If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication, + # place its name in 'server' element inside 'ldap' element. + # Example: ldap: + # server: my_ldap_server + # + # If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config), + # place 'kerberos' element instead of 'password' (and similar) elements. + # The name part of the canonical principal name of the initiator must match the user name for authentication to succeed. + # You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests + # whose initiator's realm matches it. + # Example: kerberos: '' + # Example: kerberos: + # realm: EXAMPLE.COM + # + # How to generate decent password: + # Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' + # In first line will be password and in second - corresponding SHA256. + # + # How to generate double SHA1: + # Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' + # In first line will be password and in second - corresponding double SHA1. + + password: '' + + # List of networks with open access. + # + # To open access from everywhere, specify: + # - ip: '::/0' + # + # To open access only from localhost, specify: + # - ip: '::1' + # - ip: 127.0.0.1 + # + # Each element of list has one of the following forms: + # ip: IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0 + # 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::. + # host: Hostname. Example: server01.yandex.ru. + # To check access, DNS query is performed, and all received addresses compared to peer address. + # host_regexp: Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$ + # To check access, DNS PTR query is performed for peer address and then regexp is applied. + # Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address. + # Strongly recommended that regexp is ends with $ and take all expression in '' + # All results of DNS requests are cached till server restart. + + networks: + ip: '::/0' + + # Settings profile for user. + profile: default + + # Quota for user. + quota: default + + # User can create other users and grant rights to them. + # access_management: 1 + +# Quotas. +quotas: + # Name of quota. + default: + # Limits for time interval. You could specify many intervals with different limits. + interval: + # Length of interval. + duration: 3600 + + # No limits. Just calculate resource usage for time interval. + queries: 0 + errors: 0 + result_rows: 0 + read_rows: 0 + execution_time: 0 diff --git a/release b/release index f2052840cb0..9484d79630a 100755 --- a/release +++ b/release @@ -2,23 +2,6 @@ # If you have "no space left" error, you can change the location of temporary files with BUILDPLACE environment variable. -# Advanced usage: -# Test gcc-9: -# env DIST=disco EXTRAPACKAGES="gcc-9 g++-9" DEB_CC=gcc-9 DEB_CXX=g++-9 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Test gcc-8: -# env DIST=bionic EXTRAPACKAGES="gcc-8 g++-8" DEB_CC=gcc-8 DEB_CXX=g++-8 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Clang6 build: -# env DIST=bionic EXTRAPACKAGES="clang-6.0 libstdc++-8-dev lld-6.0 liblld-6.0-dev libclang-6.0-dev liblld-6.0" DEB_CC=clang-6.0 DEB_CXX=clang++-6.0 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Clang7 build: -# env DIST=unstable EXTRAPACKAGES="clang-7 libstdc++-8-dev lld-7 liblld-7-dev libclang-7-dev liblld-7" DEB_CC=clang-7 DEB_CXX=clang++-7 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Clang6 without internal compiler (for low memory arm64): -# env DIST=bionic DISABLE_PARALLEL=1 EXTRAPACKAGES="clang-6.0 libstdc++-8-dev" DEB_CC=clang-6.0 DEB_CXX=clang++-6.0 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Do not compile internal compiler but use from system: -# env CMAKE_FLAGS="-DUSE_INTERNAL_LLVM_LIBRARY=0 -DENABLE_EMBEDDED_COMPILER=0 -DINTERNAL_COMPILER_EXECUTABLE=clang-6.0 -DINTERNAL_LINKER_EXECUTABLE=ld.lld-6.0 -DINTERNAL_COMPILER_BIN_ROOT=/usr/bin/" EXTRAPACKAGES="clang-6.0 lld-6.0 libstdc++-8-dev" DEB_CXX=clang++-6.0 DEB_CC=clang-6.0 TEST_RUN=1 TEST_OPT="compile" ./release - -# Build with ASan: -# env SANITIZER=address ./release - # Version increment: # Default release: 18.1.2 -> 18.2.0: # ./release --version @@ -31,16 +14,16 @@ set -e +# Avoid dependency on locale +LC_ALL=C + CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) cd $CUR_DIR source "./utils/release/release_lib.sh" -PBUILDER_AUTOUPDATE=${PBUILDER_AUTOUPDATE=4320} - DEBUILD_NOSIGN_OPTIONS="-us -uc" DEBUILD_NODEPS_OPTIONS="-d" -USE_PBUILDER=${USE_PBUILDER=1} if [ -z "$VERSION_STRING" ] ; then get_revision_author @@ -58,16 +41,6 @@ do elif [[ $1 == '--version' ]]; then gen_revision_author $2 exit 0 - elif [[ $1 == '--pbuilder' ]]; then - # Default - shift - elif [[ $1 == '--no-pbuilder' ]]; then - USE_PBUILDER= - shift - elif [[ $1 == '--fast' ]]; then - # Wrong but fast pbuilder mode: create base package with all depends - EXTRAPACKAGES="$EXTRAPACKAGES debhelper cmake ninja-build gcc-8 g++-8 libc6-dev libicu-dev libreadline-dev psmisc bash expect python3 python3-lxml python3-termcolor python3-requests curl perl sudo openssl netcat-openbsd" - shift elif [[ $1 == '--rpm' ]]; then MAKE_RPM=1 shift @@ -117,43 +90,10 @@ echo -e "\nCurrent version is $VERSION_STRING" if [ -z "$NO_BUILD" ] ; then gen_changelog "$VERSION_STRING" "" "$AUTHOR" "" - if [ -z "$USE_PBUILDER" ] ; then - DEB_CC=${DEB_CC:=`which gcc-10 gcc-9 gcc | head -n1`} - DEB_CXX=${DEB_CXX:=`which gcc-10 g++-9 g++ | head -n1`} - # Build (only binary packages). - debuild --preserve-env -e PATH \ - -e DEB_CC=$DEB_CC -e DEB_CXX=$DEB_CXX -e CMAKE_FLAGS="$CMAKE_FLAGS" \ - -b ${DEBUILD_NOSIGN_OPTIONS} ${DEBUILD_NODEPS_OPTIONS} - else - export DIST=${DIST:=bionic} - export SET_BUILDRESULT=${SET_BUILDRESULT:=$CUR_DIR/..} - - if [[ -z `which pbuilder` ]] ; then - sudo apt install -y pbuilder devscripts ccache fakeroot debhelper debian-archive-keyring debian-keyring lsb-release - fi - - . $CUR_DIR/debian/.pbuilderrc - - if [[ ! -e "/usr/share/debootstrap/scripts/${DIST}" ]] ; then - sudo ln -s gutsy /usr/share/debootstrap/scripts/${DIST} - fi - - if [[ -n "$FORCE_PBUILDER_CREATE" || ! -e "$BASETGZ" ]] ; then - echo Creating base system $BASETGZ - [ ! -e "/usr/share/debootstrap/scripts/${DIST}" ] && sudo ln -s gutsy /usr/share/debootstrap/scripts/${DIST} - sudo --preserve-env bash -x pbuilder create --configfile $CUR_DIR/debian/.pbuilderrc $PBUILDER_OPT - fi - - if [ "$PBUILDER_AUTOUPDATE" -gt 0 ]; then - # Update every 3 days (60*24*3 minutes) - if [[ -n "$PBUILDER_UPDATE" ]] || test `find "$BASETGZ" -mmin +$PBUILDER_AUTOUPDATE` ; then - echo Updating base system $BASETGZ - sudo --preserve-env pbuilder update --configfile $CUR_DIR/debian/.pbuilderrc $PBUILDER_OPT - fi - fi - - pdebuild --configfile $CUR_DIR/debian/.pbuilderrc -- $PBUILDER_OPT - fi + # Build (only binary packages). + debuild --preserve-env -e PATH \ + -e DEB_CC=$DEB_CC -e DEB_CXX=$DEB_CXX -e CMAKE_FLAGS="$CMAKE_FLAGS" \ + -b ${DEBUILD_NOSIGN_OPTIONS} ${DEBUILD_NODEPS_OPTIONS} fi if [ -n "$MAKE_RPM" ]; then diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index 40740b3164e..22d99112cb7 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -53,6 +53,12 @@ enum class AccessType M(ALTER_CLEAR_INDEX, "CLEAR INDEX", TABLE, ALTER_INDEX) \ M(ALTER_INDEX, "INDEX", GROUP, ALTER_TABLE) /* allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX */\ \ + M(ALTER_ADD_PROJECTION, "ADD PROJECTION", TABLE, ALTER_PROJECTION) \ + M(ALTER_DROP_PROJECTION, "DROP PROJECTION", TABLE, ALTER_PROJECTION) \ + M(ALTER_MATERIALIZE_PROJECTION, "MATERIALIZE PROJECTION", TABLE, ALTER_PROJECTION) \ + M(ALTER_CLEAR_PROJECTION, "CLEAR PROJECTION", TABLE, ALTER_PROJECTION) \ + M(ALTER_PROJECTION, "PROJECTION", GROUP, ALTER_TABLE) /* allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} PROJECTION */\ + \ M(ALTER_ADD_CONSTRAINT, "ADD CONSTRAINT", TABLE, ALTER_CONSTRAINT) \ M(ALTER_DROP_CONSTRAINT, "DROP CONSTRAINT", TABLE, ALTER_CONSTRAINT) \ M(ALTER_CONSTRAINT, "CONSTRAINT", GROUP, ALTER_TABLE) /* allows to execute ALTER {ADD|DROP} CONSTRAINT */\ @@ -62,7 +68,7 @@ enum class AccessType enabled implicitly by the grant ALTER_TABLE */\ M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \ - M(ALTER_FETCH_PARTITION, "FETCH PARTITION", TABLE, ALTER_TABLE) \ + M(ALTER_FETCH_PARTITION, "ALTER FETCH PART, FETCH PARTITION", TABLE, ALTER_TABLE) \ M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \ \ M(ALTER_TABLE, "", GROUP, ALTER) \ @@ -130,8 +136,10 @@ enum class AccessType M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_SYMBOLS, "RELOAD SYMBOLS", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \ + M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\ M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \ + M(SYSTEM_RESTART_DISK, "SYSTEM RESTART DISK", GLOBAL, SYSTEM) \ M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP_MERGES, START MERGES", TABLE, SYSTEM) \ M(SYSTEM_TTL_MERGES, "SYSTEM STOP TTL MERGES, SYSTEM START TTL MERGES, STOP TTL MERGES, START TTL MERGES", TABLE, SYSTEM) \ M(SYSTEM_FETCHES, "SYSTEM STOP FETCHES, SYSTEM START FETCHES, STOP FETCHES, START FETCHES", TABLE, SYSTEM) \ diff --git a/src/Access/AllowedClientHosts.h b/src/Access/AllowedClientHosts.h index a6895b120e0..7b21fd7e236 100644 --- a/src/Access/AllowedClientHosts.h +++ b/src/Access/AllowedClientHosts.h @@ -7,7 +7,9 @@ #include #include #include +#include +namespace fs = std::filesystem; namespace DB { @@ -198,9 +200,9 @@ inline String AllowedClientHosts::IPSubnet::toString() const if (isMaskAllBitsOne()) return prefix.toString(); else if (IPAddress{prefix_length, mask.family()} == mask) - return prefix.toString() + "/" + std::to_string(prefix_length); + return fs::path(prefix.toString()) / std::to_string(prefix_length); else - return prefix.toString() + "/" + mask.toString(); + return fs::path(prefix.toString()) / mask.toString(); } inline bool AllowedClientHosts::IPSubnet::isMaskAllBitsOne() const diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 0bcaef1e441..90495a83dfc 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -143,11 +143,13 @@ ContextAccess::ContextAccess(const AccessControlManager & manager_, const Params : manager(&manager_) , params(params_) { + std::lock_guard lock{mutex}; + subscription_for_user_change = manager->subscribeForChanges( *params.user_id, [this](const UUID &, const AccessEntityPtr & entity) { UserPtr changed_user = entity ? typeid_cast(entity) : nullptr; - std::lock_guard lock{mutex}; + std::lock_guard lock2{mutex}; setUser(changed_user); }); @@ -189,7 +191,7 @@ void ContextAccess::setUser(const UserPtr & user_) const current_roles_with_admin_option = user->granted_roles.findGrantedWithAdminOption(params.current_roles); } - subscription_for_roles_changes = {}; + subscription_for_roles_changes.reset(); enabled_roles = manager->getEnabledRoles(current_roles, current_roles_with_admin_option); subscription_for_roles_changes = enabled_roles->subscribeForChanges([this](const std::shared_ptr & roles_info_) { diff --git a/src/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp index 80594f66dfc..8c38cd02f9c 100644 --- a/src/Access/DiskAccessStorage.cpp +++ b/src/Access/DiskAccessStorage.cpp @@ -355,8 +355,9 @@ String DiskAccessStorage::getStorageParamsJSON() const std::lock_guard lock{mutex}; Poco::JSON::Object json; json.set("path", directory_path); - if (readonly) - json.set("readonly", readonly.load()); + bool readonly_loaded = readonly; + if (readonly_loaded) + json.set("readonly", Poco::Dynamic::Var{true}); std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM oss.exceptions(std::ios::failbit); Poco::JSON::Stringifier::stringify(json, oss); diff --git a/src/Access/ExternalAuthenticators.cpp b/src/Access/ExternalAuthenticators.cpp index 1cade973724..d4100c4e520 100644 --- a/src/Access/ExternalAuthenticators.cpp +++ b/src/Access/ExternalAuthenticators.cpp @@ -20,13 +20,42 @@ namespace ErrorCodes namespace { -auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const String & name) +void parseLDAPSearchParams(LDAPClient::SearchParams & params, const Poco::Util::AbstractConfiguration & config, const String & prefix) +{ + const bool has_base_dn = config.has(prefix + ".base_dn"); + const bool has_search_filter = config.has(prefix + ".search_filter"); + const bool has_attribute = config.has(prefix + ".attribute"); + const bool has_scope = config.has(prefix + ".scope"); + + if (has_base_dn) + params.base_dn = config.getString(prefix + ".base_dn"); + + if (has_search_filter) + params.search_filter = config.getString(prefix + ".search_filter"); + + if (has_attribute) + params.attribute = config.getString(prefix + ".attribute"); + + if (has_scope) + { + auto scope = config.getString(prefix + ".scope"); + boost::algorithm::to_lower(scope); + + if (scope == "base") params.scope = LDAPClient::SearchParams::Scope::BASE; + else if (scope == "one_level") params.scope = LDAPClient::SearchParams::Scope::ONE_LEVEL; + else if (scope == "subtree") params.scope = LDAPClient::SearchParams::Scope::SUBTREE; + else if (scope == "children") params.scope = LDAPClient::SearchParams::Scope::CHILDREN; + else + throw Exception("Invalid value for 'scope' field of LDAP search parameters in '" + prefix + + "' section, must be one of 'base', 'one_level', 'subtree', or 'children'", ErrorCodes::BAD_ARGUMENTS); + } +} + +void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConfiguration & config, const String & name) { if (name.empty()) throw Exception("LDAP server name cannot be empty", ErrorCodes::BAD_ARGUMENTS); - LDAPClient::Params params; - const String ldap_server_config = "ldap_servers." + name; const bool has_host = config.has(ldap_server_config + ".host"); @@ -34,6 +63,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str const bool has_bind_dn = config.has(ldap_server_config + ".bind_dn"); const bool has_auth_dn_prefix = config.has(ldap_server_config + ".auth_dn_prefix"); const bool has_auth_dn_suffix = config.has(ldap_server_config + ".auth_dn_suffix"); + const bool has_user_dn_detection = config.has(ldap_server_config + ".user_dn_detection"); const bool has_verification_cooldown = config.has(ldap_server_config + ".verification_cooldown"); const bool has_enable_tls = config.has(ldap_server_config + ".enable_tls"); const bool has_tls_minimum_protocol_version = config.has(ldap_server_config + ".tls_minimum_protocol_version"); @@ -66,6 +96,17 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str params.bind_dn = auth_dn_prefix + "{user_name}" + auth_dn_suffix; } + if (has_user_dn_detection) + { + if (!params.user_dn_detection) + { + params.user_dn_detection.emplace(); + params.user_dn_detection->attribute = "dn"; + } + + parseLDAPSearchParams(*params.user_dn_detection, config, ldap_server_config + ".user_dn_detection"); + } + if (has_verification_cooldown) params.verification_cooldown = std::chrono::seconds{config.getUInt64(ldap_server_config + ".verification_cooldown")}; @@ -77,7 +118,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str if (enable_tls_lc_str == "starttls") params.enable_tls = LDAPClient::Params::TLSEnable::YES_STARTTLS; else if (config.getBool(ldap_server_config + ".enable_tls")) - params.enable_tls = LDAPClient::Params::TLSEnable::YES; + params.enable_tls = LDAPClient::Params::TLSEnable::YES; //-V1048 else params.enable_tls = LDAPClient::Params::TLSEnable::NO; } @@ -96,7 +137,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str else if (tls_minimum_protocol_version_lc_str == "tls1.1") params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_1; else if (tls_minimum_protocol_version_lc_str == "tls1.2") - params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; + params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048 else throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS); } @@ -113,7 +154,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str else if (tls_require_cert_lc_str == "try") params.tls_require_cert = LDAPClient::Params::TLSRequireCert::TRY; else if (tls_require_cert_lc_str == "demand") - params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; + params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048 else throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS); } @@ -143,14 +184,10 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str } else params.port = (params.enable_tls == LDAPClient::Params::TLSEnable::YES ? 636 : 389); - - return params; } -auto parseKerberosParams(const Poco::Util::AbstractConfiguration & config) +void parseKerberosParams(GSSAcceptorContext::Params & params, const Poco::Util::AbstractConfiguration & config) { - GSSAcceptorContext::Params params; - Poco::Util::AbstractConfiguration::Keys keys; config.keys("kerberos", keys); @@ -180,12 +217,20 @@ auto parseKerberosParams(const Poco::Util::AbstractConfiguration & config) params.realm = config.getString("kerberos.realm", ""); params.principal = config.getString("kerberos.principal", ""); - - return params; } } +void parseLDAPRoleSearchParams(LDAPClient::RoleSearchParams & params, const Poco::Util::AbstractConfiguration & config, const String & prefix) +{ + parseLDAPSearchParams(params, config, prefix); + + const bool has_prefix = config.has(prefix + ".prefix"); + + if (has_prefix) + params.prefix = config.getString(prefix + ".prefix"); +} + void ExternalAuthenticators::reset() { std::scoped_lock lock(mutex); @@ -229,7 +274,8 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur { try { - ldap_client_params_blueprint.insert_or_assign(ldap_server_name, parseLDAPServer(config, ldap_server_name)); + ldap_client_params_blueprint.erase(ldap_server_name); + parseLDAPServer(ldap_client_params_blueprint.emplace(ldap_server_name, LDAPClient::Params{}).first->second, config, ldap_server_name); } catch (...) { @@ -240,7 +286,7 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur try { if (kerberos_keys_count > 0) - kerberos_params = parseKerberosParams(config); + parseKerberosParams(kerberos_params.emplace(), config); } catch (...) { @@ -249,7 +295,7 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur } bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const BasicCredentials & credentials, - const LDAPClient::SearchParamsList * search_params, LDAPClient::SearchResultsList * search_results) const + const LDAPClient::RoleSearchParamsList * role_search_params, LDAPClient::SearchResultsList * role_search_results) const { std::optional params; std::size_t params_hash = 0; @@ -267,9 +313,9 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B params->password = credentials.getPassword(); params->combineCoreHash(params_hash); - if (search_params) + if (role_search_params) { - for (const auto & params_instance : *search_params) + for (const auto & params_instance : *role_search_params) { params_instance.combineHash(params_hash); } @@ -301,14 +347,14 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B // Ensure that search_params are compatible. ( - search_params == nullptr ? - entry.last_successful_search_results.empty() : - search_params->size() == entry.last_successful_search_results.size() + role_search_params == nullptr ? + entry.last_successful_role_search_results.empty() : + role_search_params->size() == entry.last_successful_role_search_results.size() ) ) { - if (search_results) - *search_results = entry.last_successful_search_results; + if (role_search_results) + *role_search_results = entry.last_successful_role_search_results; return true; } @@ -326,7 +372,7 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B } LDAPSimpleAuthClient client(params.value()); - const auto result = client.authenticate(search_params, search_results); + const auto result = client.authenticate(role_search_params, role_search_results); const auto current_check_timestamp = std::chrono::steady_clock::now(); // Update the cache, but only if this is the latest check and the server is still configured in a compatible way. @@ -345,9 +391,9 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B std::size_t new_params_hash = 0; new_params.combineCoreHash(new_params_hash); - if (search_params) + if (role_search_params) { - for (const auto & params_instance : *search_params) + for (const auto & params_instance : *role_search_params) { params_instance.combineHash(new_params_hash); } @@ -363,17 +409,17 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B entry.last_successful_params_hash = params_hash; entry.last_successful_authentication_timestamp = current_check_timestamp; - if (search_results) - entry.last_successful_search_results = *search_results; + if (role_search_results) + entry.last_successful_role_search_results = *role_search_results; else - entry.last_successful_search_results.clear(); + entry.last_successful_role_search_results.clear(); } else if ( entry.last_successful_params_hash != params_hash || ( - search_params == nullptr ? - !entry.last_successful_search_results.empty() : - search_params->size() != entry.last_successful_search_results.size() + role_search_params == nullptr ? + !entry.last_successful_role_search_results.empty() : + role_search_params->size() != entry.last_successful_role_search_results.size() ) ) { diff --git a/src/Access/ExternalAuthenticators.h b/src/Access/ExternalAuthenticators.h index c8feea7eada..24f1f7b6528 100644 --- a/src/Access/ExternalAuthenticators.h +++ b/src/Access/ExternalAuthenticators.h @@ -34,7 +34,7 @@ public: // The name and readiness of the credentials must be verified before calling these. bool checkLDAPCredentials(const String & server, const BasicCredentials & credentials, - const LDAPClient::SearchParamsList * search_params = nullptr, LDAPClient::SearchResultsList * search_results = nullptr) const; + const LDAPClient::RoleSearchParamsList * role_search_params = nullptr, LDAPClient::SearchResultsList * role_search_results = nullptr) const; bool checkKerberosCredentials(const String & realm, const GSSAcceptorContext & credentials) const; GSSAcceptorContext::Params getKerberosParams() const; @@ -44,7 +44,7 @@ private: { std::size_t last_successful_params_hash = 0; std::chrono::steady_clock::time_point last_successful_authentication_timestamp; - LDAPClient::SearchResultsList last_successful_search_results; + LDAPClient::SearchResultsList last_successful_role_search_results; }; using LDAPCache = std::unordered_map; // user name -> cache entry @@ -58,4 +58,6 @@ private: std::optional kerberos_params; }; +void parseLDAPRoleSearchParams(LDAPClient::RoleSearchParams & params, const Poco::Util::AbstractConfiguration & config, const String & prefix); + } diff --git a/src/Access/GrantedRoles.cpp b/src/Access/GrantedRoles.cpp index 7930b56e44d..2659f8a3ec9 100644 --- a/src/Access/GrantedRoles.cpp +++ b/src/Access/GrantedRoles.cpp @@ -136,7 +136,7 @@ GrantedRoles::Elements GrantedRoles::getElements() const boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids)); if (!element.empty()) { - element.admin_option = false; + element.admin_option = false; //-V1048 elements.emplace_back(std::move(element)); } diff --git a/src/Access/IAccessStorage.h b/src/Access/IAccessStorage.h index 2cdd8eabf73..cc914664149 100644 --- a/src/Access/IAccessStorage.h +++ b/src/Access/IAccessStorage.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include #include diff --git a/src/Access/LDAPAccessStorage.cpp b/src/Access/LDAPAccessStorage.cpp index b47a9b3e041..c1d54e8c9aa 100644 --- a/src/Access/LDAPAccessStorage.cpp +++ b/src/Access/LDAPAccessStorage.cpp @@ -68,34 +68,15 @@ void LDAPAccessStorage::setConfiguration(AccessControlManager * access_control_m common_roles_cfg.insert(role_names.begin(), role_names.end()); } - LDAPClient::SearchParamsList role_search_params_cfg; + LDAPClient::RoleSearchParamsList role_search_params_cfg; if (has_role_mapping) { Poco::Util::AbstractConfiguration::Keys all_keys; config.keys(prefix, all_keys); for (const auto & key : all_keys) { - if (key != "role_mapping" && key.find("role_mapping[") != 0) - continue; - - const String rm_prefix = prefix_str + key; - const String rm_prefix_str = rm_prefix + '.'; - role_search_params_cfg.emplace_back(); - auto & rm_params = role_search_params_cfg.back(); - - rm_params.base_dn = config.getString(rm_prefix_str + "base_dn", ""); - rm_params.search_filter = config.getString(rm_prefix_str + "search_filter", ""); - rm_params.attribute = config.getString(rm_prefix_str + "attribute", "cn"); - rm_params.prefix = config.getString(rm_prefix_str + "prefix", ""); - - auto scope = config.getString(rm_prefix_str + "scope", "subtree"); - boost::algorithm::to_lower(scope); - if (scope == "base") rm_params.scope = LDAPClient::SearchParams::Scope::BASE; - else if (scope == "one_level") rm_params.scope = LDAPClient::SearchParams::Scope::ONE_LEVEL; - else if (scope == "subtree") rm_params.scope = LDAPClient::SearchParams::Scope::SUBTREE; - else if (scope == "children") rm_params.scope = LDAPClient::SearchParams::Scope::CHILDREN; - else - throw Exception("Invalid value of 'scope' field in '" + key + "' section of LDAP user directory, must be one of 'base', 'one_level', 'subtree', or 'children'", ErrorCodes::BAD_ARGUMENTS); + if (key == "role_mapping" || key.find("role_mapping[") == 0) + parseLDAPRoleSearchParams(role_search_params_cfg.emplace_back(), config, prefix_str + key); } } @@ -364,7 +345,7 @@ std::set LDAPAccessStorage::mapExternalRolesNoLock(const LDAPClient::Sea bool LDAPAccessStorage::areLDAPCredentialsValidNoLock(const User & user, const Credentials & credentials, - const ExternalAuthenticators & external_authenticators, LDAPClient::SearchResultsList & search_results) const + const ExternalAuthenticators & external_authenticators, LDAPClient::SearchResultsList & role_search_results) const { if (!credentials.isReady()) return false; @@ -373,7 +354,7 @@ bool LDAPAccessStorage::areLDAPCredentialsValidNoLock(const User & user, const C return false; if (const auto * basic_credentials = dynamic_cast(&credentials)) - return external_authenticators.checkLDAPCredentials(ldap_server_name, *basic_credentials, &role_search_params, &search_results); + return external_authenticators.checkLDAPCredentials(ldap_server_name, *basic_credentials, &role_search_params, &role_search_results); return false; } diff --git a/src/Access/LDAPAccessStorage.h b/src/Access/LDAPAccessStorage.h index ea0ab47c225..33ac9f0a914 100644 --- a/src/Access/LDAPAccessStorage.h +++ b/src/Access/LDAPAccessStorage.h @@ -68,12 +68,12 @@ private: void updateAssignedRolesNoLock(const UUID & id, const String & user_name, const LDAPClient::SearchResultsList & external_roles) const; std::set mapExternalRolesNoLock(const LDAPClient::SearchResultsList & external_roles) const; bool areLDAPCredentialsValidNoLock(const User & user, const Credentials & credentials, - const ExternalAuthenticators & external_authenticators, LDAPClient::SearchResultsList & search_results) const; + const ExternalAuthenticators & external_authenticators, LDAPClient::SearchResultsList & role_search_results) const; mutable std::recursive_mutex mutex; AccessControlManager * access_control_manager = nullptr; String ldap_server_name; - LDAPClient::SearchParamsList role_search_params; + LDAPClient::RoleSearchParamsList role_search_params; std::set common_role_names; // role name that should be granted to all users at all times mutable std::map external_role_hashes; // user name -> LDAPClient::SearchResultsList hash (most recently retrieved and processed) mutable std::map> users_per_roles; // role name -> user names (...it should be granted to; may but don't have to exist for common roles) diff --git a/src/Access/LDAPClient.cpp b/src/Access/LDAPClient.cpp index 5c4b7dd8d99..a8f9675774b 100644 --- a/src/Access/LDAPClient.cpp +++ b/src/Access/LDAPClient.cpp @@ -32,6 +32,11 @@ void LDAPClient::SearchParams::combineHash(std::size_t & seed) const boost::hash_combine(seed, static_cast(scope)); boost::hash_combine(seed, search_filter); boost::hash_combine(seed, attribute); +} + +void LDAPClient::RoleSearchParams::combineHash(std::size_t & seed) const +{ + SearchParams::combineHash(seed); boost::hash_combine(seed, prefix); } @@ -42,6 +47,9 @@ void LDAPClient::Params::combineCoreHash(std::size_t & seed) const boost::hash_combine(seed, bind_dn); boost::hash_combine(seed, user); boost::hash_combine(seed, password); + + if (user_dn_detection) + user_dn_detection->combineHash(seed); } LDAPClient::LDAPClient(const Params & params_) @@ -286,18 +294,33 @@ void LDAPClient::openConnection() if (params.enable_tls == LDAPClient::Params::TLSEnable::YES_STARTTLS) diag(ldap_start_tls_s(handle, nullptr, nullptr)); + final_user_name = escapeForLDAP(params.user); + final_bind_dn = replacePlaceholders(params.bind_dn, { {"{user_name}", final_user_name} }); + final_user_dn = final_bind_dn; // The default value... may be updated right after a successful bind. + switch (params.sasl_mechanism) { case LDAPClient::Params::SASLMechanism::SIMPLE: { - const auto escaped_user_name = escapeForLDAP(params.user); - const auto bind_dn = replacePlaceholders(params.bind_dn, { {"{user_name}", escaped_user_name} }); - ::berval cred; cred.bv_val = const_cast(params.password.c_str()); cred.bv_len = params.password.size(); - diag(ldap_sasl_bind_s(handle, bind_dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr)); + diag(ldap_sasl_bind_s(handle, final_bind_dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr)); + + // Once bound, run the user DN search query and update the default value, if asked. + if (params.user_dn_detection) + { + const auto user_dn_search_results = search(*params.user_dn_detection); + + if (user_dn_search_results.empty()) + throw Exception("Failed to detect user DN: empty search results", ErrorCodes::LDAP_ERROR); + + if (user_dn_search_results.size() > 1) + throw Exception("Failed to detect user DN: more than one entry in the search results", ErrorCodes::LDAP_ERROR); + + final_user_dn = *user_dn_search_results.begin(); + } break; } @@ -316,6 +339,9 @@ void LDAPClient::closeConnection() noexcept ldap_unbind_ext_s(handle, nullptr, nullptr); handle = nullptr; + final_user_name.clear(); + final_bind_dn.clear(); + final_user_dn.clear(); } LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params) @@ -333,10 +359,19 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params) case SearchParams::Scope::CHILDREN: scope = LDAP_SCOPE_CHILDREN; break; } - const auto escaped_user_name = escapeForLDAP(params.user); - const auto bind_dn = replacePlaceholders(params.bind_dn, { {"{user_name}", escaped_user_name} }); - const auto base_dn = replacePlaceholders(search_params.base_dn, { {"{user_name}", escaped_user_name}, {"{bind_dn}", bind_dn} }); - const auto search_filter = replacePlaceholders(search_params.search_filter, { {"{user_name}", escaped_user_name}, {"{bind_dn}", bind_dn}, {"{base_dn}", base_dn} }); + const auto final_base_dn = replacePlaceholders(search_params.base_dn, { + {"{user_name}", final_user_name}, + {"{bind_dn}", final_bind_dn}, + {"{user_dn}", final_user_dn} + }); + + const auto final_search_filter = replacePlaceholders(search_params.search_filter, { + {"{user_name}", final_user_name}, + {"{bind_dn}", final_bind_dn}, + {"{user_dn}", final_user_dn}, + {"{base_dn}", final_base_dn} + }); + char * attrs[] = { const_cast(search_params.attribute.c_str()), nullptr }; ::timeval timeout = { params.search_timeout.count(), 0 }; LDAPMessage* msgs = nullptr; @@ -349,7 +384,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params) } }); - diag(ldap_search_ext_s(handle, base_dn.c_str(), scope, search_filter.c_str(), attrs, 0, nullptr, nullptr, &timeout, params.search_limit, &msgs)); + diag(ldap_search_ext_s(handle, final_base_dn.c_str(), scope, final_search_filter.c_str(), attrs, 0, nullptr, nullptr, &timeout, params.search_limit, &msgs)); for ( auto * msg = ldap_first_message(handle, msgs); @@ -361,6 +396,27 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params) { case LDAP_RES_SEARCH_ENTRY: { + // Extract DN separately, if the requested attribute is DN. + if (boost::iequals("dn", search_params.attribute)) + { + BerElement * ber = nullptr; + + SCOPE_EXIT({ + if (ber) + { + ber_free(ber, 0); + ber = nullptr; + } + }); + + ::berval bv; + + diag(ldap_get_dn_ber(handle, msg, &ber, &bv)); + + if (bv.bv_val && bv.bv_len > 0) + result.emplace(bv.bv_val, bv.bv_len); + } + BerElement * ber = nullptr; SCOPE_EXIT({ @@ -471,12 +527,12 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params) return result; } -bool LDAPSimpleAuthClient::authenticate(const SearchParamsList * search_params, SearchResultsList * search_results) +bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList * role_search_params, SearchResultsList * role_search_results) { if (params.user.empty()) throw Exception("LDAP authentication of a user with empty name is not allowed", ErrorCodes::BAD_ARGUMENTS); - if (!search_params != !search_results) + if (!role_search_params != !role_search_results) throw Exception("Cannot return LDAP search results", ErrorCodes::BAD_ARGUMENTS); // Silently reject authentication attempt if the password is empty as if it didn't match. @@ -489,21 +545,21 @@ bool LDAPSimpleAuthClient::authenticate(const SearchParamsList * search_params, openConnection(); // While connected, run search queries and save the results, if asked. - if (search_params) + if (role_search_params) { - search_results->clear(); - search_results->reserve(search_params->size()); + role_search_results->clear(); + role_search_results->reserve(role_search_params->size()); try { - for (const auto & single_search_params : *search_params) + for (const auto & params_instance : *role_search_params) { - search_results->emplace_back(search(single_search_params)); + role_search_results->emplace_back(search(params_instance)); } } catch (...) { - search_results->clear(); + role_search_results->clear(); throw; } } @@ -532,7 +588,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams &) throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); } -bool LDAPSimpleAuthClient::authenticate(const SearchParamsList *, SearchResultsList *) +bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList *, SearchResultsList *) { throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); } diff --git a/src/Access/LDAPClient.h b/src/Access/LDAPClient.h index 4fc97bb957b..388e7ad0f0d 100644 --- a/src/Access/LDAPClient.h +++ b/src/Access/LDAPClient.h @@ -38,12 +38,20 @@ public: Scope scope = Scope::SUBTREE; String search_filter; String attribute = "cn"; + + void combineHash(std::size_t & seed) const; + }; + + struct RoleSearchParams + : public SearchParams + { String prefix; void combineHash(std::size_t & seed) const; }; - using SearchParamsList = std::vector; + using RoleSearchParamsList = std::vector; + using SearchResults = std::set; using SearchResultsList = std::vector; @@ -105,6 +113,8 @@ public: String user; String password; + std::optional user_dn_detection; + std::chrono::seconds verification_cooldown{0}; std::chrono::seconds operation_timeout{40}; @@ -134,6 +144,9 @@ protected: #if USE_LDAP LDAP * handle = nullptr; #endif + String final_user_name; + String final_bind_dn; + String final_user_dn; }; class LDAPSimpleAuthClient @@ -141,7 +154,7 @@ class LDAPSimpleAuthClient { public: using LDAPClient::LDAPClient; - bool authenticate(const SearchParamsList * search_params, SearchResultsList * search_results); + bool authenticate(const RoleSearchParamsList * role_search_params, SearchResultsList * role_search_results); }; } diff --git a/src/Access/MemoryAccessStorage.h b/src/Access/MemoryAccessStorage.h index 92439342168..512ccff1d1b 100644 --- a/src/Access/MemoryAccessStorage.h +++ b/src/Access/MemoryAccessStorage.h @@ -51,7 +51,7 @@ private: void setAllNoLock(const std::vector> & all_entities, Notifications & notifications); void prepareNotifications(const Entry & entry, bool remove, Notifications & notifications) const; - mutable std::mutex mutex; + mutable std::recursive_mutex mutex; std::unordered_map entries_by_id; /// We want to search entries both by ID and by the pair of name and type. std::unordered_map entries_by_name_and_type[static_cast(EntityType::MAX)]; mutable std::list handlers_by_type[static_cast(EntityType::MAX)]; diff --git a/src/Access/Quota.h b/src/Access/Quota.h index 430bdca29b0..b7970b2583b 100644 --- a/src/Access/Quota.h +++ b/src/Access/Quota.h @@ -45,7 +45,7 @@ struct Quota : public IAccessEntity struct ResourceTypeInfo { - const char * const raw_name; + const char * const raw_name = ""; const String name; /// Lowercased with underscores, e.g. "result_rows". const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS". const bool output_as_float = false; diff --git a/src/Access/RolesOrUsersSet.h b/src/Access/RolesOrUsersSet.h index 0d8983c2ec3..871bb0c0758 100644 --- a/src/Access/RolesOrUsersSet.h +++ b/src/Access/RolesOrUsersSet.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include diff --git a/src/Access/RowPolicy.h b/src/Access/RowPolicy.h index c9b4d69152d..723db545dbe 100644 --- a/src/Access/RowPolicy.h +++ b/src/Access/RowPolicy.h @@ -2,6 +2,7 @@ #include #include +#include #include diff --git a/src/Access/ya.make.in b/src/Access/ya.make.in index 0c5692a9bfa..1f11c7d7d2a 100644 --- a/src/Access/ya.make.in +++ b/src/Access/ya.make.in @@ -8,7 +8,7 @@ PEERDIR( SRCS( - + ) END() diff --git a/src/AggregateFunctions/AggregateFunctionAggThrow.cpp b/src/AggregateFunctions/AggregateFunctionAggThrow.cpp index c699dd4f217..c9d292f1993 100644 --- a/src/AggregateFunctions/AggregateFunctionAggThrow.cpp +++ b/src/AggregateFunctions/AggregateFunctionAggThrow.cpp @@ -11,6 +11,7 @@ namespace DB { +struct Settings; namespace ErrorCodes { @@ -60,6 +61,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void create(AggregateDataPtr __restrict place) const override { if (std::uniform_real_distribution<>(0.0, 1.0)(thread_local_rng) <= throw_probability) @@ -103,7 +106,7 @@ public: void registerAggregateFunctionAggThrow(AggregateFunctionFactory & factory) { - factory.registerFunction("aggThrow", [](const std::string & name, const DataTypes & argument_types, const Array & parameters) + factory.registerFunction("aggThrow", [](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *) { Float64 throw_probability = 1.0; if (parameters.size() == 1) diff --git a/src/AggregateFunctions/AggregateFunctionAny.cpp b/src/AggregateFunctions/AggregateFunctionAny.cpp index 8b18abae884..9bc6e6af14f 100644 --- a/src/AggregateFunctions/AggregateFunctionAny.cpp +++ b/src/AggregateFunctions/AggregateFunctionAny.cpp @@ -1,28 +1,27 @@ #include #include -#include -#include "registerAggregateFunctions.h" namespace DB { +struct Settings; namespace { -AggregateFunctionPtr createAggregateFunctionAny(const std::string & name, const DataTypes & argument_types, const Array & parameters) +AggregateFunctionPtr createAggregateFunctionAny(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings) { - return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters)); + return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters, settings)); } -AggregateFunctionPtr createAggregateFunctionAnyLast(const std::string & name, const DataTypes & argument_types, const Array & parameters) +AggregateFunctionPtr createAggregateFunctionAnyLast(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings) { - return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters)); + return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters, settings)); } -AggregateFunctionPtr createAggregateFunctionAnyHeavy(const std::string & name, const DataTypes & argument_types, const Array & parameters) +AggregateFunctionPtr createAggregateFunctionAnyHeavy(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings) { - return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters)); + return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters, settings)); } } diff --git a/src/AggregateFunctions/AggregateFunctionArgMinMax.h b/src/AggregateFunctions/AggregateFunctionArgMinMax.h index 9efc907aed3..335ee7c8ecb 100644 --- a/src/AggregateFunctions/AggregateFunctionArgMinMax.h +++ b/src/AggregateFunctions/AggregateFunctionArgMinMax.h @@ -1,16 +1,15 @@ #pragma once -#include // SingleValueDataString used in embedded compiler -#include -#include -#include -#include #include -#include "Columns/IColumn.h" +#include +#include +#include // SingleValueDataString used in embedded compiler namespace DB { +struct Settings; + namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; @@ -24,53 +23,47 @@ struct AggregateFunctionArgMinMaxData using ResultData_t = ResultData; using ValueData_t = ValueData; - ResultData result; // the argument at which the minimum/maximum value is reached. - ValueData value; // value for which the minimum/maximum is calculated. + ResultData result; // the argument at which the minimum/maximum value is reached. + ValueData value; // value for which the minimum/maximum is calculated. - static bool allocatesMemoryInArena() { return ResultData::allocatesMemoryInArena() || ValueData::allocatesMemoryInArena(); } - - static String name() { return StringRef(ValueData_t::name()) == StringRef("min") ? "argMin" : "argMax"; } + static bool allocatesMemoryInArena() + { + return ResultData::allocatesMemoryInArena() || ValueData::allocatesMemoryInArena(); + } }; /// Returns the first arg value found for the minimum/maximum value. Example: argMax(arg, value). template -class AggregateFunctionArgMinMax final : public IAggregateFunctionTupleArgHelper, 2> +class AggregateFunctionArgMinMax final : public IAggregateFunctionDataHelper> { private: const DataTypePtr & type_res; const DataTypePtr & type_val; const SerializationPtr serialization_res; const SerializationPtr serialization_val; - bool tuple_argument; - using Base = IAggregateFunctionTupleArgHelper, 2>; + using Base = IAggregateFunctionDataHelper>; public: - AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_, const bool tuple_argument_) - : Base({type_res_, type_val_}, {}, tuple_argument_) + AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_) + : Base({type_res_, type_val_}, {}) , type_res(this->argument_types[0]) , type_val(this->argument_types[1]) , serialization_res(type_res->getDefaultSerialization()) , serialization_val(type_val->getDefaultSerialization()) { if (!type_val->isComparable()) - throw Exception( - "Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName() - + " because the values of that data type are not comparable", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - this->tuple_argument = tuple_argument_; + throw Exception("Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName() + + " because the values of that data type are not comparable", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } - String getName() const override { return Data::name(); } + String getName() const override + { + return StringRef(Data::ValueData_t::name()) == StringRef("min") ? "argMin" : "argMax"; + } DataTypePtr getReturnType() const override { - if (tuple_argument) - { - return std::make_shared(DataTypes{this->type_res, this->type_val}); - } - return type_res; } @@ -98,21 +91,15 @@ public: this->data(place).value.read(buf, *serialization_val, arena); } - bool allocatesMemoryInArena() const override { return Data::allocatesMemoryInArena(); } + bool allocatesMemoryInArena() const override + { + return Data::allocatesMemoryInArena(); + } void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - if (tuple_argument) - { - auto & tup = assert_cast(to); - - this->data(place).result.insertResultInto(tup.getColumn(0)); - this->data(place).value.insertResultInto(tup.getColumn(1)); - } - else - this->data(place).result.insertResultInto(to); + this->data(place).result.insertResultInto(to); } }; - } diff --git a/src/AggregateFunctions/AggregateFunctionArray.cpp b/src/AggregateFunctions/AggregateFunctionArray.cpp index d0f17da5aa4..3eddbbb3fb2 100644 --- a/src/AggregateFunctions/AggregateFunctionArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionArray.cpp @@ -5,6 +5,7 @@ namespace DB { +struct Settings; namespace ErrorCodes { diff --git a/src/AggregateFunctions/AggregateFunctionArray.h b/src/AggregateFunctions/AggregateFunctionArray.h index ef16fcde87b..f1005e2e43a 100644 --- a/src/AggregateFunctions/AggregateFunctionArray.h +++ b/src/AggregateFunctions/AggregateFunctionArray.h @@ -9,6 +9,7 @@ namespace DB { +struct Settings; namespace ErrorCodes { diff --git a/src/AggregateFunctions/AggregateFunctionAvg.cpp b/src/AggregateFunctions/AggregateFunctionAvg.cpp index 9b1c3d6cef6..a96c8c01407 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.cpp +++ b/src/AggregateFunctions/AggregateFunctionAvg.cpp @@ -3,10 +3,11 @@ #include #include #include -#include "registerAggregateFunctions.h" namespace DB { +struct Settings; + namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; @@ -20,7 +21,7 @@ bool allowType(const DataTypePtr& type) noexcept return t.isInt() || t.isUInt() || t.isFloat() || t.isDecimal(); } -AggregateFunctionPtr createAggregateFunctionAvg(const std::string & name, const DataTypes & argument_types, const Array & parameters) +AggregateFunctionPtr createAggregateFunctionAvg(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *) { assertNoParameters(name, parameters); assertUnary(name, argument_types); diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 7bf742294b4..7cdef3bfe69 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -7,11 +7,12 @@ #include #include #include -#include "Core/DecimalFunctions.h" +#include namespace DB { +struct Settings; template using DecimalOrVectorCol = std::conditional_t, ColumnDecimal, ColumnVector>; @@ -96,7 +97,9 @@ public: UInt32 num_scale_ = 0, UInt32 denom_scale_ = 0) : Base(argument_types_, {}), num_scale(num_scale_), denom_scale(denom_scale_) {} - DataTypePtr getReturnType() const final { return std::make_shared>(); } + DataTypePtr getReturnType() const override { return std::make_shared>(); } + + bool allocatesMemoryInArena() const override { return false; } void NO_SANITIZE_UNDEFINED merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override { diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp b/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp index 983b3bf3d4c..b7fdb3460e3 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp @@ -4,10 +4,11 @@ #include #include #include -#include "registerAggregateFunctions.h" namespace DB { +struct Settings; + namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; @@ -60,7 +61,8 @@ static IAggregateFunction * create(const IDataType & first_type, const IDataType #undef LINE } -AggregateFunctionPtr createAggregateFunctionAvgWeighted(const std::string & name, const DataTypes & argument_types, const Array & parameters) +AggregateFunctionPtr +createAggregateFunctionAvgWeighted(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *) { assertNoParameters(name, parameters); assertBinary(name, argument_types); diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h index f8b452fc444..5842e7311e9 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h @@ -5,18 +5,20 @@ namespace DB { -template +struct Settings; + +template using AvgWeightedFieldType = std::conditional_t, std::conditional_t, Decimal256, Decimal128>, std::conditional_t, Float64, // no way to do UInt128 * UInt128, better cast to Float64 NearestFieldType>>; -template +template using MaxFieldType = std::conditional_t<(sizeof(AvgWeightedFieldType) > sizeof(AvgWeightedFieldType)), AvgWeightedFieldType, AvgWeightedFieldType>; -template +template class AggregateFunctionAvgWeighted final : public AggregateFunctionAvgBase< MaxFieldType, AvgWeightedFieldType, AggregateFunctionAvgWeighted> diff --git a/src/AggregateFunctions/AggregateFunctionBitwise.cpp b/src/AggregateFunctions/AggregateFunctionBitwise.cpp index cfcef81243a..320231e09ab 100644 --- a/src/AggregateFunctions/AggregateFunctionBitwise.cpp +++ b/src/AggregateFunctions/AggregateFunctionBitwise.cpp @@ -7,6 +7,8 @@ namespace DB { +struct Settings; + namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; @@ -16,7 +18,7 @@ namespace { template